net/virtio: fix uninitialized RSS key
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1387                            enum rte_flow_field_id field, int inherit,
1388                            const struct rte_flow_attr *attr,
1389                            struct rte_flow_error *error)
1390 {
1391         struct mlx5_priv *priv = dev->data->dev_private;
1392
1393         switch (field) {
1394         case RTE_FLOW_FIELD_START:
1395                 return 32;
1396         case RTE_FLOW_FIELD_MAC_DST:
1397         case RTE_FLOW_FIELD_MAC_SRC:
1398                 return 48;
1399         case RTE_FLOW_FIELD_VLAN_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_VLAN_ID:
1402                 return 12;
1403         case RTE_FLOW_FIELD_MAC_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_IPV4_DSCP:
1406                 return 6;
1407         case RTE_FLOW_FIELD_IPV4_TTL:
1408                 return 8;
1409         case RTE_FLOW_FIELD_IPV4_SRC:
1410         case RTE_FLOW_FIELD_IPV4_DST:
1411                 return 32;
1412         case RTE_FLOW_FIELD_IPV6_DSCP:
1413                 return 6;
1414         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1415                 return 8;
1416         case RTE_FLOW_FIELD_IPV6_SRC:
1417         case RTE_FLOW_FIELD_IPV6_DST:
1418                 return 128;
1419         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1420         case RTE_FLOW_FIELD_TCP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1423         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1424                 return 32;
1425         case RTE_FLOW_FIELD_TCP_FLAGS:
1426                 return 9;
1427         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1428         case RTE_FLOW_FIELD_UDP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_VXLAN_VNI:
1431         case RTE_FLOW_FIELD_GENEVE_VNI:
1432                 return 24;
1433         case RTE_FLOW_FIELD_GTP_TEID:
1434         case RTE_FLOW_FIELD_TAG:
1435                 return 32;
1436         case RTE_FLOW_FIELD_MARK:
1437                 return __builtin_popcount(priv->sh->dv_mark_mask);
1438         case RTE_FLOW_FIELD_META:
1439                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1440                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1441         case RTE_FLOW_FIELD_POINTER:
1442         case RTE_FLOW_FIELD_VALUE:
1443                 return inherit < 0 ? 0 : inherit;
1444         default:
1445                 MLX5_ASSERT(false);
1446         }
1447         return 0;
1448 }
1449
1450 static void
1451 mlx5_flow_field_id_to_modify_info
1452                 (const struct rte_flow_action_modify_data *data,
1453                  struct field_modify_info *info, uint32_t *mask,
1454                  uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
1455                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 4,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[1] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 0,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1487                                                     (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 0,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, off,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 4,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[1] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 0,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1517                                                     (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 0,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, off,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4, 12,
1583                                                 MLX5_MODI_OUT_SIPV6_31_0};
1584                                 if (width < 32) {
1585                                         mask[3] =
1586                                                 rte_cpu_to_be_32(0xffffffff >>
1587                                                                  (32 - width));
1588                                         width = 0;
1589                                 } else {
1590                                         mask[3] = RTE_BE32(0xffffffff);
1591                                         width -= 32;
1592                                 }
1593                                 if (!width)
1594                                         break;
1595                                 ++idx;
1596                         }
1597                         if (data->offset < 64) {
1598                                 info[idx] = (struct field_modify_info){4, 8,
1599                                                 MLX5_MODI_OUT_SIPV6_63_32};
1600                                 if (width < 32) {
1601                                         mask[2] =
1602                                                 rte_cpu_to_be_32(0xffffffff >>
1603                                                                  (32 - width));
1604                                         width = 0;
1605                                 } else {
1606                                         mask[2] = RTE_BE32(0xffffffff);
1607                                         width -= 32;
1608                                 }
1609                                 if (!width)
1610                                         break;
1611                                 ++idx;
1612                         }
1613                         if (data->offset < 96) {
1614                                 info[idx] = (struct field_modify_info){4, 4,
1615                                                 MLX5_MODI_OUT_SIPV6_95_64};
1616                                 if (width < 32) {
1617                                         mask[1] =
1618                                                 rte_cpu_to_be_32(0xffffffff >>
1619                                                                  (32 - width));
1620                                         width = 0;
1621                                 } else {
1622                                         mask[1] = RTE_BE32(0xffffffff);
1623                                         width -= 32;
1624                                 }
1625                                 if (!width)
1626                                         break;
1627                                 ++idx;
1628                         }
1629                         info[idx] = (struct field_modify_info){4, 0,
1630                                                 MLX5_MODI_OUT_SIPV6_127_96};
1631                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1632                 } else {
1633                         if (data->offset < 32)
1634                                 info[idx++] = (struct field_modify_info){4, 0,
1635                                                 MLX5_MODI_OUT_SIPV6_31_0};
1636                         if (data->offset < 64)
1637                                 info[idx++] = (struct field_modify_info){4, 0,
1638                                                 MLX5_MODI_OUT_SIPV6_63_32};
1639                         if (data->offset < 96)
1640                                 info[idx++] = (struct field_modify_info){4, 0,
1641                                                 MLX5_MODI_OUT_SIPV6_95_64};
1642                         if (data->offset < 128)
1643                                 info[idx++] = (struct field_modify_info){4, 0,
1644                                                 MLX5_MODI_OUT_SIPV6_127_96};
1645                 }
1646                 break;
1647         case RTE_FLOW_FIELD_IPV6_DST:
1648                 if (mask) {
1649                         if (data->offset < 32) {
1650                                 info[idx] = (struct field_modify_info){4, 12,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[3] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[3] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4, 8,
1667                                                 MLX5_MODI_OUT_DIPV6_63_32};
1668                                 if (width < 32) {
1669                                         mask[2] =
1670                                                 rte_cpu_to_be_32(0xffffffff >>
1671                                                                  (32 - width));
1672                                         width = 0;
1673                                 } else {
1674                                         mask[2] = RTE_BE32(0xffffffff);
1675                                         width -= 32;
1676                                 }
1677                                 if (!width)
1678                                         break;
1679                                 ++idx;
1680                         }
1681                         if (data->offset < 96) {
1682                                 info[idx] = (struct field_modify_info){4, 4,
1683                                                 MLX5_MODI_OUT_DIPV6_95_64};
1684                                 if (width < 32) {
1685                                         mask[1] =
1686                                                 rte_cpu_to_be_32(0xffffffff >>
1687                                                                  (32 - width));
1688                                         width = 0;
1689                                 } else {
1690                                         mask[1] = RTE_BE32(0xffffffff);
1691                                         width -= 32;
1692                                 }
1693                                 if (!width)
1694                                         break;
1695                                 ++idx;
1696                         }
1697                         info[idx] = (struct field_modify_info){4, 0,
1698                                                 MLX5_MODI_OUT_DIPV6_127_96};
1699                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1700                 } else {
1701                         if (data->offset < 32)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_31_0};
1704                         if (data->offset < 64)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_63_32};
1707                         if (data->offset < 96)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_95_64};
1710                         if (data->offset < 128)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_127_96};
1713                 }
1714                 break;
1715         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1716                 info[idx] = (struct field_modify_info){2, 0,
1717                                         MLX5_MODI_OUT_TCP_SPORT};
1718                 if (mask)
1719                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TCP_PORT_DST:
1722                 info[idx] = (struct field_modify_info){2, 0,
1723                                         MLX5_MODI_OUT_TCP_DPORT};
1724                 if (mask)
1725                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1726                 break;
1727         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1728                 info[idx] = (struct field_modify_info){4, 0,
1729                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1730                 if (mask)
1731                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1732                                                      (32 - width));
1733                 break;
1734         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1735                 info[idx] = (struct field_modify_info){4, 0,
1736                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1737                 if (mask)
1738                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1739                                                      (32 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_TCP_FLAGS:
1742                 info[idx] = (struct field_modify_info){2, 0,
1743                                         MLX5_MODI_OUT_TCP_FLAGS};
1744                 if (mask)
1745                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1746                 break;
1747         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1748                 info[idx] = (struct field_modify_info){2, 0,
1749                                         MLX5_MODI_OUT_UDP_SPORT};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1752                 break;
1753         case RTE_FLOW_FIELD_UDP_PORT_DST:
1754                 info[idx] = (struct field_modify_info){2, 0,
1755                                         MLX5_MODI_OUT_UDP_DPORT};
1756                 if (mask)
1757                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1758                 break;
1759         case RTE_FLOW_FIELD_VXLAN_VNI:
1760                 /* not supported yet */
1761                 break;
1762         case RTE_FLOW_FIELD_GENEVE_VNI:
1763                 /* not supported yet*/
1764                 break;
1765         case RTE_FLOW_FIELD_GTP_TEID:
1766                 info[idx] = (struct field_modify_info){4, 0,
1767                                         MLX5_MODI_GTP_TEID};
1768                 if (mask)
1769                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1770                                                      (32 - width));
1771                 break;
1772         case RTE_FLOW_FIELD_TAG:
1773                 {
1774                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1775                                                    data->level, error);
1776                         if (reg < 0)
1777                                 return;
1778                         MLX5_ASSERT(reg != REG_NON);
1779                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1780                         info[idx] = (struct field_modify_info){4, 0,
1781                                                 reg_to_field[reg]};
1782                         if (mask)
1783                                 mask[idx] =
1784                                         rte_cpu_to_be_32(0xffffffff >>
1785                                                          (32 - width));
1786                 }
1787                 break;
1788         case RTE_FLOW_FIELD_MARK:
1789                 {
1790                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1791                         uint32_t mark_count = __builtin_popcount(mark_mask);
1792                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1793                                                        0, error);
1794                         if (reg < 0)
1795                                 return;
1796                         MLX5_ASSERT(reg != REG_NON);
1797                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1798                         info[idx] = (struct field_modify_info){4, 0,
1799                                                 reg_to_field[reg]};
1800                         if (mask)
1801                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1802                                          (mark_count - width)) & mark_mask);
1803                 }
1804                 break;
1805         case RTE_FLOW_FIELD_META:
1806                 {
1807                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1808                         uint32_t meta_count = __builtin_popcount(meta_mask);
1809                         uint32_t msk_c0 =
1810                                 rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
1811                         uint32_t shl_c0 = rte_bsf32(msk_c0);
1812                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1813                         if (reg < 0)
1814                                 return;
1815                         MLX5_ASSERT(reg != REG_NON);
1816                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1817                         if (reg == REG_C_0)
1818                                 *shift = shl_c0;
1819                         info[idx] = (struct field_modify_info){4, 0,
1820                                                 reg_to_field[reg]};
1821                         if (mask)
1822                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1823                                         (meta_count - width)) & meta_mask);
1824                 }
1825                 break;
1826         case RTE_FLOW_FIELD_POINTER:
1827         case RTE_FLOW_FIELD_VALUE:
1828         default:
1829                 MLX5_ASSERT(false);
1830                 break;
1831         }
1832 }
1833
1834 /**
1835  * Convert modify_field action to DV specification.
1836  *
1837  * @param[in] dev
1838  *   Pointer to the rte_eth_dev structure.
1839  * @param[in,out] resource
1840  *   Pointer to the modify-header resource.
1841  * @param[in] action
1842  *   Pointer to action specification.
1843  * @param[in] attr
1844  *   Attributes of flow that includes this item.
1845  * @param[out] error
1846  *   Pointer to the error structure.
1847  *
1848  * @return
1849  *   0 on success, a negative errno value otherwise and rte_errno is set.
1850  */
1851 static int
1852 flow_dv_convert_action_modify_field
1853                         (struct rte_eth_dev *dev,
1854                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1855                          const struct rte_flow_action *action,
1856                          const struct rte_flow_attr *attr,
1857                          struct rte_flow_error *error)
1858 {
1859         const struct rte_flow_action_modify_field *conf =
1860                 (const struct rte_flow_action_modify_field *)(action->conf);
1861         struct rte_flow_item item = {
1862                 .spec = NULL,
1863                 .mask = NULL
1864         };
1865         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1866                                                                 {0, 0, 0} };
1867         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1868                                                                 {0, 0, 0} };
1869         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1870         uint32_t type;
1871         uint32_t shift = 0;
1872
1873         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1874             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1875                 type = MLX5_MODIFICATION_TYPE_SET;
1876                 /** For SET fill the destination field (field) first. */
1877                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1878                                                   conf->width, &shift, dev,
1879                                                   attr, error);
1880                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1881                                         (void *)(uintptr_t)conf->src.pvalue :
1882                                         (void *)(uintptr_t)&conf->src.value;
1883         } else {
1884                 type = MLX5_MODIFICATION_TYPE_COPY;
1885                 /** For COPY fill the destination field (dcopy) without mask. */
1886                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1887                                                   conf->width, &shift, dev,
1888                                                   attr, error);
1889                 /** Then construct the source field (field) with mask. */
1890                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1891                                                   conf->width, &shift,
1892                                                   dev, attr, error);
1893         }
1894         item.mask = &mask;
1895         return flow_dv_convert_modify_action(&item,
1896                         field, dcopy, resource, type, error);
1897 }
1898
1899 /**
1900  * Validate MARK item.
1901  *
1902  * @param[in] dev
1903  *   Pointer to the rte_eth_dev structure.
1904  * @param[in] item
1905  *   Item specification.
1906  * @param[in] attr
1907  *   Attributes of flow that includes this item.
1908  * @param[out] error
1909  *   Pointer to error structure.
1910  *
1911  * @return
1912  *   0 on success, a negative errno value otherwise and rte_errno is set.
1913  */
1914 static int
1915 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1916                            const struct rte_flow_item *item,
1917                            const struct rte_flow_attr *attr __rte_unused,
1918                            struct rte_flow_error *error)
1919 {
1920         struct mlx5_priv *priv = dev->data->dev_private;
1921         struct mlx5_dev_config *config = &priv->config;
1922         const struct rte_flow_item_mark *spec = item->spec;
1923         const struct rte_flow_item_mark *mask = item->mask;
1924         const struct rte_flow_item_mark nic_mask = {
1925                 .id = priv->sh->dv_mark_mask,
1926         };
1927         int ret;
1928
1929         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1930                 return rte_flow_error_set(error, ENOTSUP,
1931                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1932                                           "extended metadata feature"
1933                                           " isn't enabled");
1934         if (!mlx5_flow_ext_mreg_supported(dev))
1935                 return rte_flow_error_set(error, ENOTSUP,
1936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1937                                           "extended metadata register"
1938                                           " isn't supported");
1939         if (!nic_mask.id)
1940                 return rte_flow_error_set(error, ENOTSUP,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "extended metadata register"
1943                                           " isn't available");
1944         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1945         if (ret < 0)
1946                 return ret;
1947         if (!spec)
1948                 return rte_flow_error_set(error, EINVAL,
1949                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1950                                           item->spec,
1951                                           "data cannot be empty");
1952         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1953                 return rte_flow_error_set(error, EINVAL,
1954                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1955                                           &spec->id,
1956                                           "mark id exceeds the limit");
1957         if (!mask)
1958                 mask = &nic_mask;
1959         if (!mask->id)
1960                 return rte_flow_error_set(error, EINVAL,
1961                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1962                                         "mask cannot be zero");
1963
1964         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1965                                         (const uint8_t *)&nic_mask,
1966                                         sizeof(struct rte_flow_item_mark),
1967                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1968         if (ret < 0)
1969                 return ret;
1970         return 0;
1971 }
1972
1973 /**
1974  * Validate META item.
1975  *
1976  * @param[in] dev
1977  *   Pointer to the rte_eth_dev structure.
1978  * @param[in] item
1979  *   Item specification.
1980  * @param[in] attr
1981  *   Attributes of flow that includes this item.
1982  * @param[out] error
1983  *   Pointer to error structure.
1984  *
1985  * @return
1986  *   0 on success, a negative errno value otherwise and rte_errno is set.
1987  */
1988 static int
1989 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1990                            const struct rte_flow_item *item,
1991                            const struct rte_flow_attr *attr,
1992                            struct rte_flow_error *error)
1993 {
1994         struct mlx5_priv *priv = dev->data->dev_private;
1995         struct mlx5_dev_config *config = &priv->config;
1996         const struct rte_flow_item_meta *spec = item->spec;
1997         const struct rte_flow_item_meta *mask = item->mask;
1998         struct rte_flow_item_meta nic_mask = {
1999                 .data = UINT32_MAX
2000         };
2001         int reg;
2002         int ret;
2003
2004         if (!spec)
2005                 return rte_flow_error_set(error, EINVAL,
2006                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2007                                           item->spec,
2008                                           "data cannot be empty");
2009         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2010                 if (!mlx5_flow_ext_mreg_supported(dev))
2011                         return rte_flow_error_set(error, ENOTSUP,
2012                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2013                                           "extended metadata register"
2014                                           " isn't supported");
2015                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2016                 if (reg < 0)
2017                         return reg;
2018                 if (reg == REG_NON)
2019                         return rte_flow_error_set(error, ENOTSUP,
2020                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2021                                         "unavailable extended metadata register");
2022                 if (reg == REG_B)
2023                         return rte_flow_error_set(error, ENOTSUP,
2024                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2025                                           "match on reg_b "
2026                                           "isn't supported");
2027                 if (reg != REG_A)
2028                         nic_mask.data = priv->sh->dv_meta_mask;
2029         } else {
2030                 if (attr->transfer)
2031                         return rte_flow_error_set(error, ENOTSUP,
2032                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2033                                         "extended metadata feature "
2034                                         "should be enabled when "
2035                                         "meta item is requested "
2036                                         "with e-switch mode ");
2037                 if (attr->ingress)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "match on metadata for ingress "
2041                                         "is not supported in legacy "
2042                                         "metadata mode");
2043         }
2044         if (!mask)
2045                 mask = &rte_flow_item_meta_mask;
2046         if (!mask->data)
2047                 return rte_flow_error_set(error, EINVAL,
2048                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2049                                         "mask cannot be zero");
2050
2051         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2052                                         (const uint8_t *)&nic_mask,
2053                                         sizeof(struct rte_flow_item_meta),
2054                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2055         return ret;
2056 }
2057
2058 /**
2059  * Validate TAG item.
2060  *
2061  * @param[in] dev
2062  *   Pointer to the rte_eth_dev structure.
2063  * @param[in] item
2064  *   Item specification.
2065  * @param[in] attr
2066  *   Attributes of flow that includes this item.
2067  * @param[out] error
2068  *   Pointer to error structure.
2069  *
2070  * @return
2071  *   0 on success, a negative errno value otherwise and rte_errno is set.
2072  */
2073 static int
2074 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2075                           const struct rte_flow_item *item,
2076                           const struct rte_flow_attr *attr __rte_unused,
2077                           struct rte_flow_error *error)
2078 {
2079         const struct rte_flow_item_tag *spec = item->spec;
2080         const struct rte_flow_item_tag *mask = item->mask;
2081         const struct rte_flow_item_tag nic_mask = {
2082                 .data = RTE_BE32(UINT32_MAX),
2083                 .index = 0xff,
2084         };
2085         int ret;
2086
2087         if (!mlx5_flow_ext_mreg_supported(dev))
2088                 return rte_flow_error_set(error, ENOTSUP,
2089                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2090                                           "extensive metadata register"
2091                                           " isn't supported");
2092         if (!spec)
2093                 return rte_flow_error_set(error, EINVAL,
2094                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2095                                           item->spec,
2096                                           "data cannot be empty");
2097         if (!mask)
2098                 mask = &rte_flow_item_tag_mask;
2099         if (!mask->data)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2102                                         "mask cannot be zero");
2103
2104         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2105                                         (const uint8_t *)&nic_mask,
2106                                         sizeof(struct rte_flow_item_tag),
2107                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2108         if (ret < 0)
2109                 return ret;
2110         if (mask->index != 0xff)
2111                 return rte_flow_error_set(error, EINVAL,
2112                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2113                                           "partial mask for tag index"
2114                                           " is not supported");
2115         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2116         if (ret < 0)
2117                 return ret;
2118         MLX5_ASSERT(ret != REG_NON);
2119         return 0;
2120 }
2121
2122 /**
2123  * Validate vport item.
2124  *
2125  * @param[in] dev
2126  *   Pointer to the rte_eth_dev structure.
2127  * @param[in] item
2128  *   Item specification.
2129  * @param[in] attr
2130  *   Attributes of flow that includes this item.
2131  * @param[in] item_flags
2132  *   Bit-fields that holds the items detected until now.
2133  * @param[out] error
2134  *   Pointer to error structure.
2135  *
2136  * @return
2137  *   0 on success, a negative errno value otherwise and rte_errno is set.
2138  */
2139 static int
2140 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2141                               const struct rte_flow_item *item,
2142                               const struct rte_flow_attr *attr,
2143                               uint64_t item_flags,
2144                               struct rte_flow_error *error)
2145 {
2146         const struct rte_flow_item_port_id *spec = item->spec;
2147         const struct rte_flow_item_port_id *mask = item->mask;
2148         const struct rte_flow_item_port_id switch_mask = {
2149                         .id = 0xffffffff,
2150         };
2151         struct mlx5_priv *esw_priv;
2152         struct mlx5_priv *dev_priv;
2153         int ret;
2154
2155         if (!attr->transfer)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM,
2158                                           NULL,
2159                                           "match on port id is valid only"
2160                                           " when transfer flag is enabled");
2161         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2162                 return rte_flow_error_set(error, ENOTSUP,
2163                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2164                                           "multiple source ports are not"
2165                                           " supported");
2166         if (!mask)
2167                 mask = &switch_mask;
2168         if (mask->id != 0xffffffff)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2171                                            mask,
2172                                            "no support for partial mask on"
2173                                            " \"id\" field");
2174         ret = mlx5_flow_item_acceptable
2175                                 (item, (const uint8_t *)mask,
2176                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2177                                  sizeof(struct rte_flow_item_port_id),
2178                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2179         if (ret)
2180                 return ret;
2181         if (!spec)
2182                 return 0;
2183         if (spec->id == MLX5_PORT_ESW_MGR)
2184                 return 0;
2185         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2186         if (!esw_priv)
2187                 return rte_flow_error_set(error, rte_errno,
2188                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2189                                           "failed to obtain E-Switch info for"
2190                                           " port");
2191         dev_priv = mlx5_dev_to_eswitch_info(dev);
2192         if (!dev_priv)
2193                 return rte_flow_error_set(error, rte_errno,
2194                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2195                                           NULL,
2196                                           "failed to obtain E-Switch info");
2197         if (esw_priv->domain_id != dev_priv->domain_id)
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2200                                           "cannot match on a port from a"
2201                                           " different E-Switch");
2202         return 0;
2203 }
2204
2205 /**
2206  * Validate VLAN item.
2207  *
2208  * @param[in] item
2209  *   Item specification.
2210  * @param[in] item_flags
2211  *   Bit-fields that holds the items detected until now.
2212  * @param[in] dev
2213  *   Ethernet device flow is being created on.
2214  * @param[out] error
2215  *   Pointer to error structure.
2216  *
2217  * @return
2218  *   0 on success, a negative errno value otherwise and rte_errno is set.
2219  */
2220 static int
2221 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2222                            uint64_t item_flags,
2223                            struct rte_eth_dev *dev,
2224                            struct rte_flow_error *error)
2225 {
2226         const struct rte_flow_item_vlan *mask = item->mask;
2227         const struct rte_flow_item_vlan nic_mask = {
2228                 .tci = RTE_BE16(UINT16_MAX),
2229                 .inner_type = RTE_BE16(UINT16_MAX),
2230                 .has_more_vlan = 1,
2231         };
2232         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2233         int ret;
2234         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2235                                         MLX5_FLOW_LAYER_INNER_L4) :
2236                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2237                                         MLX5_FLOW_LAYER_OUTER_L4);
2238         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2239                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2240
2241         if (item_flags & vlanm)
2242                 return rte_flow_error_set(error, EINVAL,
2243                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2244                                           "multiple VLAN layers not supported");
2245         else if ((item_flags & l34m) != 0)
2246                 return rte_flow_error_set(error, EINVAL,
2247                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2248                                           "VLAN cannot follow L3/L4 layer");
2249         if (!mask)
2250                 mask = &rte_flow_item_vlan_mask;
2251         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2252                                         (const uint8_t *)&nic_mask,
2253                                         sizeof(struct rte_flow_item_vlan),
2254                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2255         if (ret)
2256                 return ret;
2257         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2258                 struct mlx5_priv *priv = dev->data->dev_private;
2259
2260                 if (priv->vmwa_context) {
2261                         /*
2262                          * Non-NULL context means we have a virtual machine
2263                          * and SR-IOV enabled, we have to create VLAN interface
2264                          * to make hypervisor to setup E-Switch vport
2265                          * context correctly. We avoid creating the multiple
2266                          * VLAN interfaces, so we cannot support VLAN tag mask.
2267                          */
2268                         return rte_flow_error_set(error, EINVAL,
2269                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2270                                                   item,
2271                                                   "VLAN tag mask is not"
2272                                                   " supported in virtual"
2273                                                   " environment");
2274                 }
2275         }
2276         return 0;
2277 }
2278
2279 /*
2280  * GTP flags are contained in 1 byte of the format:
2281  * -------------------------------------------
2282  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2283  * |-----------------------------------------|
2284  * | value | Version | PT | Res | E | S | PN |
2285  * -------------------------------------------
2286  *
2287  * Matching is supported only for GTP flags E, S, PN.
2288  */
2289 #define MLX5_GTP_FLAGS_MASK     0x07
2290
2291 /**
2292  * Validate GTP item.
2293  *
2294  * @param[in] dev
2295  *   Pointer to the rte_eth_dev structure.
2296  * @param[in] item
2297  *   Item specification.
2298  * @param[in] item_flags
2299  *   Bit-fields that holds the items detected until now.
2300  * @param[out] error
2301  *   Pointer to error structure.
2302  *
2303  * @return
2304  *   0 on success, a negative errno value otherwise and rte_errno is set.
2305  */
2306 static int
2307 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2308                           const struct rte_flow_item *item,
2309                           uint64_t item_flags,
2310                           struct rte_flow_error *error)
2311 {
2312         struct mlx5_priv *priv = dev->data->dev_private;
2313         const struct rte_flow_item_gtp *spec = item->spec;
2314         const struct rte_flow_item_gtp *mask = item->mask;
2315         const struct rte_flow_item_gtp nic_mask = {
2316                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2317                 .msg_type = 0xff,
2318                 .teid = RTE_BE32(0xffffffff),
2319         };
2320
2321         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2322                 return rte_flow_error_set(error, ENOTSUP,
2323                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2324                                           "GTP support is not enabled");
2325         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2326                 return rte_flow_error_set(error, ENOTSUP,
2327                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2328                                           "multiple tunnel layers not"
2329                                           " supported");
2330         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2331                 return rte_flow_error_set(error, EINVAL,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "no outer UDP layer found");
2334         if (!mask)
2335                 mask = &rte_flow_item_gtp_mask;
2336         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2337                 return rte_flow_error_set(error, ENOTSUP,
2338                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2339                                           "Match is supported for GTP"
2340                                           " flags only");
2341         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2342                                          (const uint8_t *)&nic_mask,
2343                                          sizeof(struct rte_flow_item_gtp),
2344                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2345 }
2346
2347 /**
2348  * Validate GTP PSC item.
2349  *
2350  * @param[in] item
2351  *   Item specification.
2352  * @param[in] last_item
2353  *   Previous validated item in the pattern items.
2354  * @param[in] gtp_item
2355  *   Previous GTP item specification.
2356  * @param[in] attr
2357  *   Pointer to flow attributes.
2358  * @param[out] error
2359  *   Pointer to error structure.
2360  *
2361  * @return
2362  *   0 on success, a negative errno value otherwise and rte_errno is set.
2363  */
2364 static int
2365 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2366                               uint64_t last_item,
2367                               const struct rte_flow_item *gtp_item,
2368                               const struct rte_flow_attr *attr,
2369                               struct rte_flow_error *error)
2370 {
2371         const struct rte_flow_item_gtp *gtp_spec;
2372         const struct rte_flow_item_gtp *gtp_mask;
2373         const struct rte_flow_item_gtp_psc *mask;
2374         const struct rte_flow_item_gtp_psc nic_mask = {
2375                 .hdr.type = 0xF,
2376                 .hdr.qfi = 0x3F,
2377         };
2378
2379         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2380                 return rte_flow_error_set
2381                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2382                          "GTP PSC item must be preceded with GTP item");
2383         gtp_spec = gtp_item->spec;
2384         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2385         /* GTP spec and E flag is requested to match zero. */
2386         if (gtp_spec &&
2387                 (gtp_mask->v_pt_rsv_flags &
2388                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2389                 return rte_flow_error_set
2390                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2391                          "GTP E flag must be 1 to match GTP PSC");
2392         /* Check the flow is not created in group zero. */
2393         if (!attr->transfer && !attr->group)
2394                 return rte_flow_error_set
2395                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2396                          "GTP PSC is not supported for group 0");
2397         /* GTP spec is here and E flag is requested to match zero. */
2398         if (!item->spec)
2399                 return 0;
2400         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2401         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2402                                          (const uint8_t *)&nic_mask,
2403                                          sizeof(struct rte_flow_item_gtp_psc),
2404                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2405 }
2406
2407 /**
2408  * Validate IPV4 item.
2409  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2410  * add specific validation of fragment_offset field,
2411  *
2412  * @param[in] item
2413  *   Item specification.
2414  * @param[in] item_flags
2415  *   Bit-fields that holds the items detected until now.
2416  * @param[out] error
2417  *   Pointer to error structure.
2418  *
2419  * @return
2420  *   0 on success, a negative errno value otherwise and rte_errno is set.
2421  */
2422 static int
2423 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2424                            const struct rte_flow_item *item,
2425                            uint64_t item_flags, uint64_t last_item,
2426                            uint16_t ether_type, struct rte_flow_error *error)
2427 {
2428         int ret;
2429         struct mlx5_priv *priv = dev->data->dev_private;
2430         const struct rte_flow_item_ipv4 *spec = item->spec;
2431         const struct rte_flow_item_ipv4 *last = item->last;
2432         const struct rte_flow_item_ipv4 *mask = item->mask;
2433         rte_be16_t fragment_offset_spec = 0;
2434         rte_be16_t fragment_offset_last = 0;
2435         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2436                 .hdr = {
2437                         .src_addr = RTE_BE32(0xffffffff),
2438                         .dst_addr = RTE_BE32(0xffffffff),
2439                         .type_of_service = 0xff,
2440                         .fragment_offset = RTE_BE16(0xffff),
2441                         .next_proto_id = 0xff,
2442                         .time_to_live = 0xff,
2443                 },
2444         };
2445
2446         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2447                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2448                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2449                                priv->config.hca_attr.inner_ipv4_ihl;
2450                 if (!ihl_cap)
2451                         return rte_flow_error_set(error, ENOTSUP,
2452                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2453                                                   item,
2454                                                   "IPV4 ihl offload not supported");
2455                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2456         }
2457         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2458                                            ether_type, &nic_ipv4_mask,
2459                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2460         if (ret < 0)
2461                 return ret;
2462         if (spec && mask)
2463                 fragment_offset_spec = spec->hdr.fragment_offset &
2464                                        mask->hdr.fragment_offset;
2465         if (!fragment_offset_spec)
2466                 return 0;
2467         /*
2468          * spec and mask are valid, enforce using full mask to make sure the
2469          * complete value is used correctly.
2470          */
2471         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2472                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2475                                           item, "must use full mask for"
2476                                           " fragment_offset");
2477         /*
2478          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2479          * indicating this is 1st fragment of fragmented packet.
2480          * This is not yet supported in MLX5, return appropriate error message.
2481          */
2482         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2483                 return rte_flow_error_set(error, ENOTSUP,
2484                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2485                                           "match on first fragment not "
2486                                           "supported");
2487         if (fragment_offset_spec && !last)
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "specified value not supported");
2491         /* spec and last are valid, validate the specified range. */
2492         fragment_offset_last = last->hdr.fragment_offset &
2493                                mask->hdr.fragment_offset;
2494         /*
2495          * Match on fragment_offset spec 0x2001 and last 0x3fff
2496          * means MF is 1 and frag-offset is > 0.
2497          * This packet is fragment 2nd and onward, excluding last.
2498          * This is not yet supported in MLX5, return appropriate
2499          * error message.
2500          */
2501         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2502             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2505                                           last, "match on following "
2506                                           "fragments not supported");
2507         /*
2508          * Match on fragment_offset spec 0x0001 and last 0x1fff
2509          * means MF is 0 and frag-offset is > 0.
2510          * This packet is last fragment of fragmented packet.
2511          * This is not yet supported in MLX5, return appropriate
2512          * error message.
2513          */
2514         if (fragment_offset_spec == RTE_BE16(1) &&
2515             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2516                 return rte_flow_error_set(error, ENOTSUP,
2517                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2518                                           last, "match on last "
2519                                           "fragment not supported");
2520         /*
2521          * Match on fragment_offset spec 0x0001 and last 0x3fff
2522          * means MF and/or frag-offset is not 0.
2523          * This is a fragmented packet.
2524          * Other range values are invalid and rejected.
2525          */
2526         if (!(fragment_offset_spec == RTE_BE16(1) &&
2527               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2528                 return rte_flow_error_set(error, ENOTSUP,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2530                                           "specified range not supported");
2531         return 0;
2532 }
2533
2534 /**
2535  * Validate IPV6 fragment extension item.
2536  *
2537  * @param[in] item
2538  *   Item specification.
2539  * @param[in] item_flags
2540  *   Bit-fields that holds the items detected until now.
2541  * @param[out] error
2542  *   Pointer to error structure.
2543  *
2544  * @return
2545  *   0 on success, a negative errno value otherwise and rte_errno is set.
2546  */
2547 static int
2548 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2549                                     uint64_t item_flags,
2550                                     struct rte_flow_error *error)
2551 {
2552         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2553         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2554         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2555         rte_be16_t frag_data_spec = 0;
2556         rte_be16_t frag_data_last = 0;
2557         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2558         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2559                                       MLX5_FLOW_LAYER_OUTER_L4;
2560         int ret = 0;
2561         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2562                 .hdr = {
2563                         .next_header = 0xff,
2564                         .frag_data = RTE_BE16(0xffff),
2565                 },
2566         };
2567
2568         if (item_flags & l4m)
2569                 return rte_flow_error_set(error, EINVAL,
2570                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2571                                           "ipv6 fragment extension item cannot "
2572                                           "follow L4 item.");
2573         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2574             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2575                 return rte_flow_error_set(error, EINVAL,
2576                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2577                                           "ipv6 fragment extension item must "
2578                                           "follow ipv6 item");
2579         if (spec && mask)
2580                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2581         if (!frag_data_spec)
2582                 return 0;
2583         /*
2584          * spec and mask are valid, enforce using full mask to make sure the
2585          * complete value is used correctly.
2586          */
2587         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2588                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2591                                           item, "must use full mask for"
2592                                           " frag_data");
2593         /*
2594          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2595          * This is 1st fragment of fragmented packet.
2596          */
2597         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2598                 return rte_flow_error_set(error, ENOTSUP,
2599                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2600                                           "match on first fragment not "
2601                                           "supported");
2602         if (frag_data_spec && !last)
2603                 return rte_flow_error_set(error, EINVAL,
2604                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2605                                           "specified value not supported");
2606         ret = mlx5_flow_item_acceptable
2607                                 (item, (const uint8_t *)mask,
2608                                  (const uint8_t *)&nic_mask,
2609                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2610                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2611         if (ret)
2612                 return ret;
2613         /* spec and last are valid, validate the specified range. */
2614         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2615         /*
2616          * Match on frag_data spec 0x0009 and last 0xfff9
2617          * means M is 1 and frag-offset is > 0.
2618          * This packet is fragment 2nd and onward, excluding last.
2619          * This is not yet supported in MLX5, return appropriate
2620          * error message.
2621          */
2622         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2623                                        RTE_IPV6_EHDR_MF_MASK) &&
2624             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2625                 return rte_flow_error_set(error, ENOTSUP,
2626                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2627                                           last, "match on following "
2628                                           "fragments not supported");
2629         /*
2630          * Match on frag_data spec 0x0008 and last 0xfff8
2631          * means M is 0 and frag-offset is > 0.
2632          * This packet is last fragment of fragmented packet.
2633          * This is not yet supported in MLX5, return appropriate
2634          * error message.
2635          */
2636         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2637             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2640                                           last, "match on last "
2641                                           "fragment not supported");
2642         /* Other range values are invalid and rejected. */
2643         return rte_flow_error_set(error, EINVAL,
2644                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2645                                   "specified range not supported");
2646 }
2647
2648 /*
2649  * Validate ASO CT item.
2650  *
2651  * @param[in] dev
2652  *   Pointer to the rte_eth_dev structure.
2653  * @param[in] item
2654  *   Item specification.
2655  * @param[in] item_flags
2656  *   Pointer to bit-fields that holds the items detected until now.
2657  * @param[out] error
2658  *   Pointer to error structure.
2659  *
2660  * @return
2661  *   0 on success, a negative errno value otherwise and rte_errno is set.
2662  */
2663 static int
2664 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2665                              const struct rte_flow_item *item,
2666                              uint64_t *item_flags,
2667                              struct rte_flow_error *error)
2668 {
2669         const struct rte_flow_item_conntrack *spec = item->spec;
2670         const struct rte_flow_item_conntrack *mask = item->mask;
2671         RTE_SET_USED(dev);
2672         uint32_t flags;
2673
2674         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2677                                           "Only one CT is supported");
2678         if (!mask)
2679                 mask = &rte_flow_item_conntrack_mask;
2680         flags = spec->flags & mask->flags;
2681         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2682             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2683              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2684              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2685                 return rte_flow_error_set(error, EINVAL,
2686                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2687                                           "Conflict status bits");
2688         /* State change also needs to be considered. */
2689         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2690         return 0;
2691 }
2692
2693 /**
2694  * Validate the pop VLAN action.
2695  *
2696  * @param[in] dev
2697  *   Pointer to the rte_eth_dev structure.
2698  * @param[in] action_flags
2699  *   Holds the actions detected until now.
2700  * @param[in] action
2701  *   Pointer to the pop vlan action.
2702  * @param[in] item_flags
2703  *   The items found in this flow rule.
2704  * @param[in] attr
2705  *   Pointer to flow attributes.
2706  * @param[out] error
2707  *   Pointer to error structure.
2708  *
2709  * @return
2710  *   0 on success, a negative errno value otherwise and rte_errno is set.
2711  */
2712 static int
2713 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2714                                  uint64_t action_flags,
2715                                  const struct rte_flow_action *action,
2716                                  uint64_t item_flags,
2717                                  const struct rte_flow_attr *attr,
2718                                  struct rte_flow_error *error)
2719 {
2720         const struct mlx5_priv *priv = dev->data->dev_private;
2721         struct mlx5_dev_ctx_shared *sh = priv->sh;
2722         bool direction_error = false;
2723
2724         if (!priv->sh->pop_vlan_action)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2727                                           NULL,
2728                                           "pop vlan action is not supported");
2729         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2730         if (attr->transfer) {
2731                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2732                 bool is_cx5 = sh->steering_format_version ==
2733                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2734
2735                 if (fdb_tx && is_cx5)
2736                         direction_error = true;
2737         } else if (attr->egress) {
2738                 direction_error = true;
2739         }
2740         if (direction_error)
2741                 return rte_flow_error_set(error, ENOTSUP,
2742                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2743                                           NULL,
2744                                           "pop vlan action not supported for egress");
2745         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2746                 return rte_flow_error_set(error, ENOTSUP,
2747                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2748                                           "no support for multiple VLAN "
2749                                           "actions");
2750         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2751         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2752             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2753                 return rte_flow_error_set(error, ENOTSUP,
2754                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2755                                           NULL,
2756                                           "cannot pop vlan after decap without "
2757                                           "match on inner vlan in the flow");
2758         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2759         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2760             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2761                 return rte_flow_error_set(error, ENOTSUP,
2762                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2763                                           NULL,
2764                                           "cannot pop vlan without a "
2765                                           "match on (outer) vlan in the flow");
2766         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2767                 return rte_flow_error_set(error, EINVAL,
2768                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2769                                           "wrong action order, port_id should "
2770                                           "be after pop VLAN action");
2771         if (!attr->transfer && priv->representor)
2772                 return rte_flow_error_set(error, ENOTSUP,
2773                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2774                                           "pop vlan action for VF representor "
2775                                           "not supported on NIC table");
2776         return 0;
2777 }
2778
2779 /**
2780  * Get VLAN default info from vlan match info.
2781  *
2782  * @param[in] items
2783  *   the list of item specifications.
2784  * @param[out] vlan
2785  *   pointer VLAN info to fill to.
2786  *
2787  * @return
2788  *   0 on success, a negative errno value otherwise and rte_errno is set.
2789  */
2790 static void
2791 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2792                                   struct rte_vlan_hdr *vlan)
2793 {
2794         const struct rte_flow_item_vlan nic_mask = {
2795                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2796                                 MLX5DV_FLOW_VLAN_VID_MASK),
2797                 .inner_type = RTE_BE16(0xffff),
2798         };
2799
2800         if (items == NULL)
2801                 return;
2802         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2803                 int type = items->type;
2804
2805                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2806                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2807                         break;
2808         }
2809         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2810                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2811                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2812
2813                 /* If VLAN item in pattern doesn't contain data, return here. */
2814                 if (!vlan_v)
2815                         return;
2816                 if (!vlan_m)
2817                         vlan_m = &nic_mask;
2818                 /* Only full match values are accepted */
2819                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2820                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2821                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2822                         vlan->vlan_tci |=
2823                                 rte_be_to_cpu_16(vlan_v->tci &
2824                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2825                 }
2826                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2827                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2828                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2829                         vlan->vlan_tci |=
2830                                 rte_be_to_cpu_16(vlan_v->tci &
2831                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2832                 }
2833                 if (vlan_m->inner_type == nic_mask.inner_type)
2834                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2835                                                            vlan_m->inner_type);
2836         }
2837 }
2838
2839 /**
2840  * Validate the push VLAN action.
2841  *
2842  * @param[in] dev
2843  *   Pointer to the rte_eth_dev structure.
2844  * @param[in] action_flags
2845  *   Holds the actions detected until now.
2846  * @param[in] item_flags
2847  *   The items found in this flow rule.
2848  * @param[in] action
2849  *   Pointer to the action structure.
2850  * @param[in] attr
2851  *   Pointer to flow attributes
2852  * @param[out] error
2853  *   Pointer to error structure.
2854  *
2855  * @return
2856  *   0 on success, a negative errno value otherwise and rte_errno is set.
2857  */
2858 static int
2859 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2860                                   uint64_t action_flags,
2861                                   const struct rte_flow_item_vlan *vlan_m,
2862                                   const struct rte_flow_action *action,
2863                                   const struct rte_flow_attr *attr,
2864                                   struct rte_flow_error *error)
2865 {
2866         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2867         const struct mlx5_priv *priv = dev->data->dev_private;
2868         struct mlx5_dev_ctx_shared *sh = priv->sh;
2869         bool direction_error = false;
2870
2871         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2872             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2873                 return rte_flow_error_set(error, EINVAL,
2874                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2875                                           "invalid vlan ethertype");
2876         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2877                 return rte_flow_error_set(error, EINVAL,
2878                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2879                                           "wrong action order, port_id should "
2880                                           "be after push VLAN");
2881         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2882         if (attr->transfer) {
2883                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2884                 bool is_cx5 = sh->steering_format_version ==
2885                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2886
2887                 if (!fdb_tx && is_cx5)
2888                         direction_error = true;
2889         } else if (attr->ingress) {
2890                 direction_error = true;
2891         }
2892         if (direction_error)
2893                 return rte_flow_error_set(error, ENOTSUP,
2894                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2895                                           NULL,
2896                                           "push vlan action not supported for ingress");
2897         if (!attr->transfer && priv->representor)
2898                 return rte_flow_error_set(error, ENOTSUP,
2899                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2900                                           "push vlan action for VF representor "
2901                                           "not supported on NIC table");
2902         if (vlan_m &&
2903             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2904             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2905                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2906             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2907             !(mlx5_flow_find_action
2908                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2909                 return rte_flow_error_set(error, EINVAL,
2910                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2911                                           "not full match mask on VLAN PCP and "
2912                                           "there is no of_set_vlan_pcp action, "
2913                                           "push VLAN action cannot figure out "
2914                                           "PCP value");
2915         if (vlan_m &&
2916             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2917             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2918                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2919             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2920             !(mlx5_flow_find_action
2921                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2922                 return rte_flow_error_set(error, EINVAL,
2923                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2924                                           "not full match mask on VLAN VID and "
2925                                           "there is no of_set_vlan_vid action, "
2926                                           "push VLAN action cannot figure out "
2927                                           "VID value");
2928         (void)attr;
2929         return 0;
2930 }
2931
2932 /**
2933  * Validate the set VLAN PCP.
2934  *
2935  * @param[in] action_flags
2936  *   Holds the actions detected until now.
2937  * @param[in] actions
2938  *   Pointer to the list of actions remaining in the flow rule.
2939  * @param[out] error
2940  *   Pointer to error structure.
2941  *
2942  * @return
2943  *   0 on success, a negative errno value otherwise and rte_errno is set.
2944  */
2945 static int
2946 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2947                                      const struct rte_flow_action actions[],
2948                                      struct rte_flow_error *error)
2949 {
2950         const struct rte_flow_action *action = actions;
2951         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2952
2953         if (conf->vlan_pcp > 7)
2954                 return rte_flow_error_set(error, EINVAL,
2955                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2956                                           "VLAN PCP value is too big");
2957         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2958                 return rte_flow_error_set(error, ENOTSUP,
2959                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2960                                           "set VLAN PCP action must follow "
2961                                           "the push VLAN action");
2962         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2963                 return rte_flow_error_set(error, ENOTSUP,
2964                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2965                                           "Multiple VLAN PCP modification are "
2966                                           "not supported");
2967         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2968                 return rte_flow_error_set(error, EINVAL,
2969                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2970                                           "wrong action order, port_id should "
2971                                           "be after set VLAN PCP");
2972         return 0;
2973 }
2974
2975 /**
2976  * Validate the set VLAN VID.
2977  *
2978  * @param[in] item_flags
2979  *   Holds the items detected in this rule.
2980  * @param[in] action_flags
2981  *   Holds the actions detected until now.
2982  * @param[in] actions
2983  *   Pointer to the list of actions remaining in the flow rule.
2984  * @param[out] error
2985  *   Pointer to error structure.
2986  *
2987  * @return
2988  *   0 on success, a negative errno value otherwise and rte_errno is set.
2989  */
2990 static int
2991 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2992                                      uint64_t action_flags,
2993                                      const struct rte_flow_action actions[],
2994                                      struct rte_flow_error *error)
2995 {
2996         const struct rte_flow_action *action = actions;
2997         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2998
2999         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3000                 return rte_flow_error_set(error, EINVAL,
3001                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3002                                           "VLAN VID value is too big");
3003         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3004             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3005                 return rte_flow_error_set(error, ENOTSUP,
3006                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3007                                           "set VLAN VID action must follow push"
3008                                           " VLAN action or match on VLAN item");
3009         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3010                 return rte_flow_error_set(error, ENOTSUP,
3011                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3012                                           "Multiple VLAN VID modifications are "
3013                                           "not supported");
3014         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3015                 return rte_flow_error_set(error, EINVAL,
3016                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3017                                           "wrong action order, port_id should "
3018                                           "be after set VLAN VID");
3019         return 0;
3020 }
3021
3022 /*
3023  * Validate the FLAG action.
3024  *
3025  * @param[in] dev
3026  *   Pointer to the rte_eth_dev structure.
3027  * @param[in] action_flags
3028  *   Holds the actions detected until now.
3029  * @param[in] attr
3030  *   Pointer to flow attributes
3031  * @param[out] error
3032  *   Pointer to error structure.
3033  *
3034  * @return
3035  *   0 on success, a negative errno value otherwise and rte_errno is set.
3036  */
3037 static int
3038 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3039                              uint64_t action_flags,
3040                              const struct rte_flow_attr *attr,
3041                              struct rte_flow_error *error)
3042 {
3043         struct mlx5_priv *priv = dev->data->dev_private;
3044         struct mlx5_dev_config *config = &priv->config;
3045         int ret;
3046
3047         /* Fall back if no extended metadata register support. */
3048         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3049                 return mlx5_flow_validate_action_flag(action_flags, attr,
3050                                                       error);
3051         /* Extensive metadata mode requires registers. */
3052         if (!mlx5_flow_ext_mreg_supported(dev))
3053                 return rte_flow_error_set(error, ENOTSUP,
3054                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3055                                           "no metadata registers "
3056                                           "to support flag action");
3057         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3058                 return rte_flow_error_set(error, ENOTSUP,
3059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3060                                           "extended metadata register"
3061                                           " isn't available");
3062         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3063         if (ret < 0)
3064                 return ret;
3065         MLX5_ASSERT(ret > 0);
3066         if (action_flags & MLX5_FLOW_ACTION_MARK)
3067                 return rte_flow_error_set(error, EINVAL,
3068                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3069                                           "can't mark and flag in same flow");
3070         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3071                 return rte_flow_error_set(error, EINVAL,
3072                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3073                                           "can't have 2 flag"
3074                                           " actions in same flow");
3075         return 0;
3076 }
3077
3078 /**
3079  * Validate MARK action.
3080  *
3081  * @param[in] dev
3082  *   Pointer to the rte_eth_dev structure.
3083  * @param[in] action
3084  *   Pointer to action.
3085  * @param[in] action_flags
3086  *   Holds the actions detected until now.
3087  * @param[in] attr
3088  *   Pointer to flow attributes
3089  * @param[out] error
3090  *   Pointer to error structure.
3091  *
3092  * @return
3093  *   0 on success, a negative errno value otherwise and rte_errno is set.
3094  */
3095 static int
3096 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3097                              const struct rte_flow_action *action,
3098                              uint64_t action_flags,
3099                              const struct rte_flow_attr *attr,
3100                              struct rte_flow_error *error)
3101 {
3102         struct mlx5_priv *priv = dev->data->dev_private;
3103         struct mlx5_dev_config *config = &priv->config;
3104         const struct rte_flow_action_mark *mark = action->conf;
3105         int ret;
3106
3107         if (is_tunnel_offload_active(dev))
3108                 return rte_flow_error_set(error, ENOTSUP,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3110                                           "no mark action "
3111                                           "if tunnel offload active");
3112         /* Fall back if no extended metadata register support. */
3113         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3114                 return mlx5_flow_validate_action_mark(action, action_flags,
3115                                                       attr, error);
3116         /* Extensive metadata mode requires registers. */
3117         if (!mlx5_flow_ext_mreg_supported(dev))
3118                 return rte_flow_error_set(error, ENOTSUP,
3119                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3120                                           "no metadata registers "
3121                                           "to support mark action");
3122         if (!priv->sh->dv_mark_mask)
3123                 return rte_flow_error_set(error, ENOTSUP,
3124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3125                                           "extended metadata register"
3126                                           " isn't available");
3127         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3128         if (ret < 0)
3129                 return ret;
3130         MLX5_ASSERT(ret > 0);
3131         if (!mark)
3132                 return rte_flow_error_set(error, EINVAL,
3133                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3134                                           "configuration cannot be null");
3135         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3136                 return rte_flow_error_set(error, EINVAL,
3137                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3138                                           &mark->id,
3139                                           "mark id exceeds the limit");
3140         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3141                 return rte_flow_error_set(error, EINVAL,
3142                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3143                                           "can't flag and mark in same flow");
3144         if (action_flags & MLX5_FLOW_ACTION_MARK)
3145                 return rte_flow_error_set(error, EINVAL,
3146                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3147                                           "can't have 2 mark actions in same"
3148                                           " flow");
3149         return 0;
3150 }
3151
3152 /**
3153  * Validate SET_META action.
3154  *
3155  * @param[in] dev
3156  *   Pointer to the rte_eth_dev structure.
3157  * @param[in] action
3158  *   Pointer to the action structure.
3159  * @param[in] action_flags
3160  *   Holds the actions detected until now.
3161  * @param[in] attr
3162  *   Pointer to flow attributes
3163  * @param[out] error
3164  *   Pointer to error structure.
3165  *
3166  * @return
3167  *   0 on success, a negative errno value otherwise and rte_errno is set.
3168  */
3169 static int
3170 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3171                                  const struct rte_flow_action *action,
3172                                  uint64_t action_flags __rte_unused,
3173                                  const struct rte_flow_attr *attr,
3174                                  struct rte_flow_error *error)
3175 {
3176         struct mlx5_priv *priv = dev->data->dev_private;
3177         struct mlx5_dev_config *config = &priv->config;
3178         const struct rte_flow_action_set_meta *conf;
3179         uint32_t nic_mask = UINT32_MAX;
3180         int reg;
3181
3182         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3183             !mlx5_flow_ext_mreg_supported(dev))
3184                 return rte_flow_error_set(error, ENOTSUP,
3185                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3186                                           "extended metadata register"
3187                                           " isn't supported");
3188         reg = flow_dv_get_metadata_reg(dev, attr, error);
3189         if (reg < 0)
3190                 return reg;
3191         if (reg == REG_NON)
3192                 return rte_flow_error_set(error, ENOTSUP,
3193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3194                                           "unavailable extended metadata register");
3195         if (reg != REG_A && reg != REG_B) {
3196                 struct mlx5_priv *priv = dev->data->dev_private;
3197
3198                 nic_mask = priv->sh->dv_meta_mask;
3199         }
3200         if (!(action->conf))
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "configuration cannot be null");
3204         conf = (const struct rte_flow_action_set_meta *)action->conf;
3205         if (!conf->mask)
3206                 return rte_flow_error_set(error, EINVAL,
3207                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3208                                           "zero mask doesn't have any effect");
3209         if (conf->mask & ~nic_mask)
3210                 return rte_flow_error_set(error, EINVAL,
3211                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3212                                           "meta data must be within reg C0");
3213         return 0;
3214 }
3215
3216 /**
3217  * Validate SET_TAG action.
3218  *
3219  * @param[in] dev
3220  *   Pointer to the rte_eth_dev structure.
3221  * @param[in] action
3222  *   Pointer to the action structure.
3223  * @param[in] action_flags
3224  *   Holds the actions detected until now.
3225  * @param[in] attr
3226  *   Pointer to flow attributes
3227  * @param[out] error
3228  *   Pointer to error structure.
3229  *
3230  * @return
3231  *   0 on success, a negative errno value otherwise and rte_errno is set.
3232  */
3233 static int
3234 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3235                                 const struct rte_flow_action *action,
3236                                 uint64_t action_flags,
3237                                 const struct rte_flow_attr *attr,
3238                                 struct rte_flow_error *error)
3239 {
3240         const struct rte_flow_action_set_tag *conf;
3241         const uint64_t terminal_action_flags =
3242                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3243                 MLX5_FLOW_ACTION_RSS;
3244         int ret;
3245
3246         if (!mlx5_flow_ext_mreg_supported(dev))
3247                 return rte_flow_error_set(error, ENOTSUP,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "extensive metadata register"
3250                                           " isn't supported");
3251         if (!(action->conf))
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "configuration cannot be null");
3255         conf = (const struct rte_flow_action_set_tag *)action->conf;
3256         if (!conf->mask)
3257                 return rte_flow_error_set(error, EINVAL,
3258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3259                                           "zero mask doesn't have any effect");
3260         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3261         if (ret < 0)
3262                 return ret;
3263         if (!attr->transfer && attr->ingress &&
3264             (action_flags & terminal_action_flags))
3265                 return rte_flow_error_set(error, EINVAL,
3266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3267                                           "set_tag has no effect"
3268                                           " with terminal actions");
3269         return 0;
3270 }
3271
3272 /**
3273  * Validate count action.
3274  *
3275  * @param[in] dev
3276  *   Pointer to rte_eth_dev structure.
3277  * @param[in] shared
3278  *   Indicator if action is shared.
3279  * @param[in] action_flags
3280  *   Holds the actions detected until now.
3281  * @param[out] error
3282  *   Pointer to error structure.
3283  *
3284  * @return
3285  *   0 on success, a negative errno value otherwise and rte_errno is set.
3286  */
3287 static int
3288 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3289                               uint64_t action_flags,
3290                               struct rte_flow_error *error)
3291 {
3292         struct mlx5_priv *priv = dev->data->dev_private;
3293
3294         if (!priv->sh->devx)
3295                 goto notsup_err;
3296         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3297                 return rte_flow_error_set(error, EINVAL,
3298                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3299                                           "duplicate count actions set");
3300         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3301             !priv->sh->flow_hit_aso_en)
3302                 return rte_flow_error_set(error, EINVAL,
3303                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3304                                           "old age and shared count combination is not supported");
3305 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3306         return 0;
3307 #endif
3308 notsup_err:
3309         return rte_flow_error_set
3310                       (error, ENOTSUP,
3311                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3312                        NULL,
3313                        "count action not supported");
3314 }
3315
3316 /**
3317  * Validate the L2 encap action.
3318  *
3319  * @param[in] dev
3320  *   Pointer to the rte_eth_dev structure.
3321  * @param[in] action_flags
3322  *   Holds the actions detected until now.
3323  * @param[in] action
3324  *   Pointer to the action structure.
3325  * @param[in] attr
3326  *   Pointer to flow attributes.
3327  * @param[out] error
3328  *   Pointer to error structure.
3329  *
3330  * @return
3331  *   0 on success, a negative errno value otherwise and rte_errno is set.
3332  */
3333 static int
3334 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3335                                  uint64_t action_flags,
3336                                  const struct rte_flow_action *action,
3337                                  const struct rte_flow_attr *attr,
3338                                  struct rte_flow_error *error)
3339 {
3340         const struct mlx5_priv *priv = dev->data->dev_private;
3341
3342         if (!(action->conf))
3343                 return rte_flow_error_set(error, EINVAL,
3344                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3345                                           "configuration cannot be null");
3346         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3347                 return rte_flow_error_set(error, EINVAL,
3348                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3349                                           "can only have a single encap action "
3350                                           "in a flow");
3351         if (!attr->transfer && priv->representor)
3352                 return rte_flow_error_set(error, ENOTSUP,
3353                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3354                                           "encap action for VF representor "
3355                                           "not supported on NIC table");
3356         return 0;
3357 }
3358
3359 /**
3360  * Validate a decap action.
3361  *
3362  * @param[in] dev
3363  *   Pointer to the rte_eth_dev structure.
3364  * @param[in] action_flags
3365  *   Holds the actions detected until now.
3366  * @param[in] action
3367  *   Pointer to the action structure.
3368  * @param[in] item_flags
3369  *   Holds the items detected.
3370  * @param[in] attr
3371  *   Pointer to flow attributes
3372  * @param[out] error
3373  *   Pointer to error structure.
3374  *
3375  * @return
3376  *   0 on success, a negative errno value otherwise and rte_errno is set.
3377  */
3378 static int
3379 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3380                               uint64_t action_flags,
3381                               const struct rte_flow_action *action,
3382                               const uint64_t item_flags,
3383                               const struct rte_flow_attr *attr,
3384                               struct rte_flow_error *error)
3385 {
3386         const struct mlx5_priv *priv = dev->data->dev_private;
3387
3388         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3389             !priv->config.decap_en)
3390                 return rte_flow_error_set(error, ENOTSUP,
3391                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3392                                           "decap is not enabled");
3393         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3394                 return rte_flow_error_set(error, ENOTSUP,
3395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3396                                           action_flags &
3397                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3398                                           "have a single decap action" : "decap "
3399                                           "after encap is not supported");
3400         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3401                 return rte_flow_error_set(error, EINVAL,
3402                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3403                                           "can't have decap action after"
3404                                           " modify action");
3405         if (attr->egress)
3406                 return rte_flow_error_set(error, ENOTSUP,
3407                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3408                                           NULL,
3409                                           "decap action not supported for "
3410                                           "egress");
3411         if (!attr->transfer && priv->representor)
3412                 return rte_flow_error_set(error, ENOTSUP,
3413                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3414                                           "decap action for VF representor "
3415                                           "not supported on NIC table");
3416         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3417             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3418                 return rte_flow_error_set(error, ENOTSUP,
3419                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3420                                 "VXLAN item should be present for VXLAN decap");
3421         return 0;
3422 }
3423
3424 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3425
3426 /**
3427  * Validate the raw encap and decap actions.
3428  *
3429  * @param[in] dev
3430  *   Pointer to the rte_eth_dev structure.
3431  * @param[in] decap
3432  *   Pointer to the decap action.
3433  * @param[in] encap
3434  *   Pointer to the encap action.
3435  * @param[in] attr
3436  *   Pointer to flow attributes
3437  * @param[in/out] action_flags
3438  *   Holds the actions detected until now.
3439  * @param[out] actions_n
3440  *   pointer to the number of actions counter.
3441  * @param[in] action
3442  *   Pointer to the action structure.
3443  * @param[in] item_flags
3444  *   Holds the items detected.
3445  * @param[out] error
3446  *   Pointer to error structure.
3447  *
3448  * @return
3449  *   0 on success, a negative errno value otherwise and rte_errno is set.
3450  */
3451 static int
3452 flow_dv_validate_action_raw_encap_decap
3453         (struct rte_eth_dev *dev,
3454          const struct rte_flow_action_raw_decap *decap,
3455          const struct rte_flow_action_raw_encap *encap,
3456          const struct rte_flow_attr *attr, uint64_t *action_flags,
3457          int *actions_n, const struct rte_flow_action *action,
3458          uint64_t item_flags, struct rte_flow_error *error)
3459 {
3460         const struct mlx5_priv *priv = dev->data->dev_private;
3461         int ret;
3462
3463         if (encap && (!encap->size || !encap->data))
3464                 return rte_flow_error_set(error, EINVAL,
3465                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3466                                           "raw encap data cannot be empty");
3467         if (decap && encap) {
3468                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3469                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3470                         /* L3 encap. */
3471                         decap = NULL;
3472                 else if (encap->size <=
3473                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3474                            decap->size >
3475                            MLX5_ENCAPSULATION_DECISION_SIZE)
3476                         /* L3 decap. */
3477                         encap = NULL;
3478                 else if (encap->size >
3479                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3480                            decap->size >
3481                            MLX5_ENCAPSULATION_DECISION_SIZE)
3482                         /* 2 L2 actions: encap and decap. */
3483                         ;
3484                 else
3485                         return rte_flow_error_set(error,
3486                                 ENOTSUP,
3487                                 RTE_FLOW_ERROR_TYPE_ACTION,
3488                                 NULL, "unsupported too small "
3489                                 "raw decap and too small raw "
3490                                 "encap combination");
3491         }
3492         if (decap) {
3493                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3494                                                     item_flags, attr, error);
3495                 if (ret < 0)
3496                         return ret;
3497                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3498                 ++(*actions_n);
3499         }
3500         if (encap) {
3501                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3502                         return rte_flow_error_set(error, ENOTSUP,
3503                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3504                                                   NULL,
3505                                                   "small raw encap size");
3506                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3507                         return rte_flow_error_set(error, EINVAL,
3508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3509                                                   NULL,
3510                                                   "more than one encap action");
3511                 if (!attr->transfer && priv->representor)
3512                         return rte_flow_error_set
3513                                         (error, ENOTSUP,
3514                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3515                                          "encap action for VF representor "
3516                                          "not supported on NIC table");
3517                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3518                 ++(*actions_n);
3519         }
3520         return 0;
3521 }
3522
3523 /*
3524  * Validate the ASO CT action.
3525  *
3526  * @param[in] dev
3527  *   Pointer to the rte_eth_dev structure.
3528  * @param[in] action_flags
3529  *   Holds the actions detected until now.
3530  * @param[in] item_flags
3531  *   The items found in this flow rule.
3532  * @param[in] attr
3533  *   Pointer to flow attributes.
3534  * @param[out] error
3535  *   Pointer to error structure.
3536  *
3537  * @return
3538  *   0 on success, a negative errno value otherwise and rte_errno is set.
3539  */
3540 static int
3541 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3542                                uint64_t action_flags,
3543                                uint64_t item_flags,
3544                                const struct rte_flow_attr *attr,
3545                                struct rte_flow_error *error)
3546 {
3547         RTE_SET_USED(dev);
3548
3549         if (attr->group == 0 && !attr->transfer)
3550                 return rte_flow_error_set(error, ENOTSUP,
3551                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3552                                           NULL,
3553                                           "Only support non-root table");
3554         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3555                 return rte_flow_error_set(error, ENOTSUP,
3556                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3557                                           "CT cannot follow a fate action");
3558         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3559             (action_flags & MLX5_FLOW_ACTION_AGE))
3560                 return rte_flow_error_set(error, EINVAL,
3561                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3562                                           "Only one ASO action is supported");
3563         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3564                 return rte_flow_error_set(error, EINVAL,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "Encap cannot exist before CT");
3567         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3568                 return rte_flow_error_set(error, EINVAL,
3569                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3570                                           "Not a outer TCP packet");
3571         return 0;
3572 }
3573
3574 int
3575 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3576                              struct mlx5_list_entry *entry, void *cb_ctx)
3577 {
3578         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3579         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3580         struct mlx5_flow_dv_encap_decap_resource *resource;
3581
3582         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3583                                 entry);
3584         if (resource->reformat_type == ctx_resource->reformat_type &&
3585             resource->ft_type == ctx_resource->ft_type &&
3586             resource->flags == ctx_resource->flags &&
3587             resource->size == ctx_resource->size &&
3588             !memcmp((const void *)resource->buf,
3589                     (const void *)ctx_resource->buf,
3590                     resource->size))
3591                 return 0;
3592         return -1;
3593 }
3594
3595 struct mlx5_list_entry *
3596 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3597 {
3598         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3599         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3600         struct mlx5dv_dr_domain *domain;
3601         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3602         struct mlx5_flow_dv_encap_decap_resource *resource;
3603         uint32_t idx;
3604         int ret;
3605
3606         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3607                 domain = sh->fdb_domain;
3608         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3609                 domain = sh->rx_domain;
3610         else
3611                 domain = sh->tx_domain;
3612         /* Register new encap/decap resource. */
3613         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3614         if (!resource) {
3615                 rte_flow_error_set(ctx->error, ENOMEM,
3616                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3617                                    "cannot allocate resource memory");
3618                 return NULL;
3619         }
3620         *resource = *ctx_resource;
3621         resource->idx = idx;
3622         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3623                                                               domain, resource,
3624                                                              &resource->action);
3625         if (ret) {
3626                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3627                 rte_flow_error_set(ctx->error, ENOMEM,
3628                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3629                                    NULL, "cannot create action");
3630                 return NULL;
3631         }
3632
3633         return &resource->entry;
3634 }
3635
3636 struct mlx5_list_entry *
3637 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3638                              void *cb_ctx)
3639 {
3640         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3641         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3642         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3643         uint32_t idx;
3644
3645         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3646                                            &idx);
3647         if (!cache_resource) {
3648                 rte_flow_error_set(ctx->error, ENOMEM,
3649                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3650                                    "cannot allocate resource memory");
3651                 return NULL;
3652         }
3653         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3654         cache_resource->idx = idx;
3655         return &cache_resource->entry;
3656 }
3657
3658 void
3659 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3660 {
3661         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3662         struct mlx5_flow_dv_encap_decap_resource *res =
3663                                        container_of(entry, typeof(*res), entry);
3664
3665         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3666 }
3667
3668 /**
3669  * Find existing encap/decap resource or create and register a new one.
3670  *
3671  * @param[in, out] dev
3672  *   Pointer to rte_eth_dev structure.
3673  * @param[in, out] resource
3674  *   Pointer to encap/decap resource.
3675  * @parm[in, out] dev_flow
3676  *   Pointer to the dev_flow.
3677  * @param[out] error
3678  *   pointer to error structure.
3679  *
3680  * @return
3681  *   0 on success otherwise -errno and errno is set.
3682  */
3683 static int
3684 flow_dv_encap_decap_resource_register
3685                         (struct rte_eth_dev *dev,
3686                          struct mlx5_flow_dv_encap_decap_resource *resource,
3687                          struct mlx5_flow *dev_flow,
3688                          struct rte_flow_error *error)
3689 {
3690         struct mlx5_priv *priv = dev->data->dev_private;
3691         struct mlx5_dev_ctx_shared *sh = priv->sh;
3692         struct mlx5_list_entry *entry;
3693         union {
3694                 struct {
3695                         uint32_t ft_type:8;
3696                         uint32_t refmt_type:8;
3697                         /*
3698                          * Header reformat actions can be shared between
3699                          * non-root tables. One bit to indicate non-root
3700                          * table or not.
3701                          */
3702                         uint32_t is_root:1;
3703                         uint32_t reserve:15;
3704                 };
3705                 uint32_t v32;
3706         } encap_decap_key = {
3707                 {
3708                         .ft_type = resource->ft_type,
3709                         .refmt_type = resource->reformat_type,
3710                         .is_root = !!dev_flow->dv.group,
3711                         .reserve = 0,
3712                 }
3713         };
3714         struct mlx5_flow_cb_ctx ctx = {
3715                 .error = error,
3716                 .data = resource,
3717         };
3718         struct mlx5_hlist *encaps_decaps;
3719         uint64_t key64;
3720
3721         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3722                                 "encaps_decaps",
3723                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3724                                 true, true, sh,
3725                                 flow_dv_encap_decap_create_cb,
3726                                 flow_dv_encap_decap_match_cb,
3727                                 flow_dv_encap_decap_remove_cb,
3728                                 flow_dv_encap_decap_clone_cb,
3729                                 flow_dv_encap_decap_clone_free_cb);
3730         if (unlikely(!encaps_decaps))
3731                 return -rte_errno;
3732         resource->flags = dev_flow->dv.group ? 0 : 1;
3733         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3734                                  sizeof(encap_decap_key.v32), 0);
3735         if (resource->reformat_type !=
3736             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3737             resource->size)
3738                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3739         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3740         if (!entry)
3741                 return -rte_errno;
3742         resource = container_of(entry, typeof(*resource), entry);
3743         dev_flow->dv.encap_decap = resource;
3744         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3745         return 0;
3746 }
3747
3748 /**
3749  * Find existing table jump resource or create and register a new one.
3750  *
3751  * @param[in, out] dev
3752  *   Pointer to rte_eth_dev structure.
3753  * @param[in, out] tbl
3754  *   Pointer to flow table resource.
3755  * @parm[in, out] dev_flow
3756  *   Pointer to the dev_flow.
3757  * @param[out] error
3758  *   pointer to error structure.
3759  *
3760  * @return
3761  *   0 on success otherwise -errno and errno is set.
3762  */
3763 static int
3764 flow_dv_jump_tbl_resource_register
3765                         (struct rte_eth_dev *dev __rte_unused,
3766                          struct mlx5_flow_tbl_resource *tbl,
3767                          struct mlx5_flow *dev_flow,
3768                          struct rte_flow_error *error __rte_unused)
3769 {
3770         struct mlx5_flow_tbl_data_entry *tbl_data =
3771                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3772
3773         MLX5_ASSERT(tbl);
3774         MLX5_ASSERT(tbl_data->jump.action);
3775         dev_flow->handle->rix_jump = tbl_data->idx;
3776         dev_flow->dv.jump = &tbl_data->jump;
3777         return 0;
3778 }
3779
3780 int
3781 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3782                          struct mlx5_list_entry *entry, void *cb_ctx)
3783 {
3784         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3785         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3786         struct mlx5_flow_dv_port_id_action_resource *res =
3787                                        container_of(entry, typeof(*res), entry);
3788
3789         return ref->port_id != res->port_id;
3790 }
3791
3792 struct mlx5_list_entry *
3793 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3794 {
3795         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3797         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3798         struct mlx5_flow_dv_port_id_action_resource *resource;
3799         uint32_t idx;
3800         int ret;
3801
3802         /* Register new port id action resource. */
3803         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3804         if (!resource) {
3805                 rte_flow_error_set(ctx->error, ENOMEM,
3806                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3807                                    "cannot allocate port_id action memory");
3808                 return NULL;
3809         }
3810         *resource = *ref;
3811         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3812                                                         ref->port_id,
3813                                                         &resource->action);
3814         if (ret) {
3815                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot create action");
3819                 return NULL;
3820         }
3821         resource->idx = idx;
3822         return &resource->entry;
3823 }
3824
3825 struct mlx5_list_entry *
3826 flow_dv_port_id_clone_cb(void *tool_ctx,
3827                          struct mlx5_list_entry *entry __rte_unused,
3828                          void *cb_ctx)
3829 {
3830         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3831         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3832         struct mlx5_flow_dv_port_id_action_resource *resource;
3833         uint32_t idx;
3834
3835         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3836         if (!resource) {
3837                 rte_flow_error_set(ctx->error, ENOMEM,
3838                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3839                                    "cannot allocate port_id action memory");
3840                 return NULL;
3841         }
3842         memcpy(resource, entry, sizeof(*resource));
3843         resource->idx = idx;
3844         return &resource->entry;
3845 }
3846
3847 void
3848 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3849 {
3850         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3851         struct mlx5_flow_dv_port_id_action_resource *resource =
3852                                   container_of(entry, typeof(*resource), entry);
3853
3854         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3855 }
3856
3857 /**
3858  * Find existing table port ID resource or create and register a new one.
3859  *
3860  * @param[in, out] dev
3861  *   Pointer to rte_eth_dev structure.
3862  * @param[in, out] ref
3863  *   Pointer to port ID action resource reference.
3864  * @parm[in, out] dev_flow
3865  *   Pointer to the dev_flow.
3866  * @param[out] error
3867  *   pointer to error structure.
3868  *
3869  * @return
3870  *   0 on success otherwise -errno and errno is set.
3871  */
3872 static int
3873 flow_dv_port_id_action_resource_register
3874                         (struct rte_eth_dev *dev,
3875                          struct mlx5_flow_dv_port_id_action_resource *ref,
3876                          struct mlx5_flow *dev_flow,
3877                          struct rte_flow_error *error)
3878 {
3879         struct mlx5_priv *priv = dev->data->dev_private;
3880         struct mlx5_list_entry *entry;
3881         struct mlx5_flow_dv_port_id_action_resource *resource;
3882         struct mlx5_flow_cb_ctx ctx = {
3883                 .error = error,
3884                 .data = ref,
3885         };
3886
3887         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3888         if (!entry)
3889                 return -rte_errno;
3890         resource = container_of(entry, typeof(*resource), entry);
3891         dev_flow->dv.port_id_action = resource;
3892         dev_flow->handle->rix_port_id_action = resource->idx;
3893         return 0;
3894 }
3895
3896 int
3897 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3898                            struct mlx5_list_entry *entry, void *cb_ctx)
3899 {
3900         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3901         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3902         struct mlx5_flow_dv_push_vlan_action_resource *res =
3903                                        container_of(entry, typeof(*res), entry);
3904
3905         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3906 }
3907
3908 struct mlx5_list_entry *
3909 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3910 {
3911         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3912         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3913         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3914         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3915         struct mlx5dv_dr_domain *domain;
3916         uint32_t idx;
3917         int ret;
3918
3919         /* Register new port id action resource. */
3920         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3921         if (!resource) {
3922                 rte_flow_error_set(ctx->error, ENOMEM,
3923                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3924                                    "cannot allocate push_vlan action memory");
3925                 return NULL;
3926         }
3927         *resource = *ref;
3928         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3929                 domain = sh->fdb_domain;
3930         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3931                 domain = sh->rx_domain;
3932         else
3933                 domain = sh->tx_domain;
3934         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3935                                                         &resource->action);
3936         if (ret) {
3937                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3938                 rte_flow_error_set(ctx->error, ENOMEM,
3939                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3940                                    "cannot create push vlan action");
3941                 return NULL;
3942         }
3943         resource->idx = idx;
3944         return &resource->entry;
3945 }
3946
3947 struct mlx5_list_entry *
3948 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3949                            struct mlx5_list_entry *entry __rte_unused,
3950                            void *cb_ctx)
3951 {
3952         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3953         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3954         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3955         uint32_t idx;
3956
3957         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3958         if (!resource) {
3959                 rte_flow_error_set(ctx->error, ENOMEM,
3960                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3961                                    "cannot allocate push_vlan action memory");
3962                 return NULL;
3963         }
3964         memcpy(resource, entry, sizeof(*resource));
3965         resource->idx = idx;
3966         return &resource->entry;
3967 }
3968
3969 void
3970 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3971 {
3972         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3973         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3974                                   container_of(entry, typeof(*resource), entry);
3975
3976         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3977 }
3978
3979 /**
3980  * Find existing push vlan resource or create and register a new one.
3981  *
3982  * @param [in, out] dev
3983  *   Pointer to rte_eth_dev structure.
3984  * @param[in, out] ref
3985  *   Pointer to port ID action resource reference.
3986  * @parm[in, out] dev_flow
3987  *   Pointer to the dev_flow.
3988  * @param[out] error
3989  *   pointer to error structure.
3990  *
3991  * @return
3992  *   0 on success otherwise -errno and errno is set.
3993  */
3994 static int
3995 flow_dv_push_vlan_action_resource_register
3996                        (struct rte_eth_dev *dev,
3997                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
3998                         struct mlx5_flow *dev_flow,
3999                         struct rte_flow_error *error)
4000 {
4001         struct mlx5_priv *priv = dev->data->dev_private;
4002         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4003         struct mlx5_list_entry *entry;
4004         struct mlx5_flow_cb_ctx ctx = {
4005                 .error = error,
4006                 .data = ref,
4007         };
4008
4009         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4010         if (!entry)
4011                 return -rte_errno;
4012         resource = container_of(entry, typeof(*resource), entry);
4013
4014         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4015         dev_flow->dv.push_vlan_res = resource;
4016         return 0;
4017 }
4018
4019 /**
4020  * Get the size of specific rte_flow_item_type hdr size
4021  *
4022  * @param[in] item_type
4023  *   Tested rte_flow_item_type.
4024  *
4025  * @return
4026  *   sizeof struct item_type, 0 if void or irrelevant.
4027  */
4028 static size_t
4029 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4030 {
4031         size_t retval;
4032
4033         switch (item_type) {
4034         case RTE_FLOW_ITEM_TYPE_ETH:
4035                 retval = sizeof(struct rte_ether_hdr);
4036                 break;
4037         case RTE_FLOW_ITEM_TYPE_VLAN:
4038                 retval = sizeof(struct rte_vlan_hdr);
4039                 break;
4040         case RTE_FLOW_ITEM_TYPE_IPV4:
4041                 retval = sizeof(struct rte_ipv4_hdr);
4042                 break;
4043         case RTE_FLOW_ITEM_TYPE_IPV6:
4044                 retval = sizeof(struct rte_ipv6_hdr);
4045                 break;
4046         case RTE_FLOW_ITEM_TYPE_UDP:
4047                 retval = sizeof(struct rte_udp_hdr);
4048                 break;
4049         case RTE_FLOW_ITEM_TYPE_TCP:
4050                 retval = sizeof(struct rte_tcp_hdr);
4051                 break;
4052         case RTE_FLOW_ITEM_TYPE_VXLAN:
4053         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4054                 retval = sizeof(struct rte_vxlan_hdr);
4055                 break;
4056         case RTE_FLOW_ITEM_TYPE_GRE:
4057         case RTE_FLOW_ITEM_TYPE_NVGRE:
4058                 retval = sizeof(struct rte_gre_hdr);
4059                 break;
4060         case RTE_FLOW_ITEM_TYPE_MPLS:
4061                 retval = sizeof(struct rte_mpls_hdr);
4062                 break;
4063         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4064         default:
4065                 retval = 0;
4066                 break;
4067         }
4068         return retval;
4069 }
4070
4071 #define MLX5_ENCAP_IPV4_VERSION         0x40
4072 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4073 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4074 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4075 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4076 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4077 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4078
4079 /**
4080  * Convert the encap action data from list of rte_flow_item to raw buffer
4081  *
4082  * @param[in] items
4083  *   Pointer to rte_flow_item objects list.
4084  * @param[out] buf
4085  *   Pointer to the output buffer.
4086  * @param[out] size
4087  *   Pointer to the output buffer size.
4088  * @param[out] error
4089  *   Pointer to the error structure.
4090  *
4091  * @return
4092  *   0 on success, a negative errno value otherwise and rte_errno is set.
4093  */
4094 static int
4095 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4096                            size_t *size, struct rte_flow_error *error)
4097 {
4098         struct rte_ether_hdr *eth = NULL;
4099         struct rte_vlan_hdr *vlan = NULL;
4100         struct rte_ipv4_hdr *ipv4 = NULL;
4101         struct rte_ipv6_hdr *ipv6 = NULL;
4102         struct rte_udp_hdr *udp = NULL;
4103         struct rte_vxlan_hdr *vxlan = NULL;
4104         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4105         struct rte_gre_hdr *gre = NULL;
4106         size_t len;
4107         size_t temp_size = 0;
4108
4109         if (!items)
4110                 return rte_flow_error_set(error, EINVAL,
4111                                           RTE_FLOW_ERROR_TYPE_ACTION,
4112                                           NULL, "invalid empty data");
4113         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4114                 len = flow_dv_get_item_hdr_len(items->type);
4115                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4116                         return rte_flow_error_set(error, EINVAL,
4117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4118                                                   (void *)items->type,
4119                                                   "items total size is too big"
4120                                                   " for encap action");
4121                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4122                 switch (items->type) {
4123                 case RTE_FLOW_ITEM_TYPE_ETH:
4124                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4125                         break;
4126                 case RTE_FLOW_ITEM_TYPE_VLAN:
4127                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4128                         if (!eth)
4129                                 return rte_flow_error_set(error, EINVAL,
4130                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4131                                                 (void *)items->type,
4132                                                 "eth header not found");
4133                         if (!eth->ether_type)
4134                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4135                         break;
4136                 case RTE_FLOW_ITEM_TYPE_IPV4:
4137                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4138                         if (!vlan && !eth)
4139                                 return rte_flow_error_set(error, EINVAL,
4140                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4141                                                 (void *)items->type,
4142                                                 "neither eth nor vlan"
4143                                                 " header found");
4144                         if (vlan && !vlan->eth_proto)
4145                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4146                         else if (eth && !eth->ether_type)
4147                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4148                         if (!ipv4->version_ihl)
4149                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4150                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4151                         if (!ipv4->time_to_live)
4152                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4153                         break;
4154                 case RTE_FLOW_ITEM_TYPE_IPV6:
4155                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4156                         if (!vlan && !eth)
4157                                 return rte_flow_error_set(error, EINVAL,
4158                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4159                                                 (void *)items->type,
4160                                                 "neither eth nor vlan"
4161                                                 " header found");
4162                         if (vlan && !vlan->eth_proto)
4163                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4164                         else if (eth && !eth->ether_type)
4165                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4166                         if (!ipv6->vtc_flow)
4167                                 ipv6->vtc_flow =
4168                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4169                         if (!ipv6->hop_limits)
4170                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_UDP:
4173                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4174                         if (!ipv4 && !ipv6)
4175                                 return rte_flow_error_set(error, EINVAL,
4176                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4177                                                 (void *)items->type,
4178                                                 "ip header not found");
4179                         if (ipv4 && !ipv4->next_proto_id)
4180                                 ipv4->next_proto_id = IPPROTO_UDP;
4181                         else if (ipv6 && !ipv6->proto)
4182                                 ipv6->proto = IPPROTO_UDP;
4183                         break;
4184                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4185                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4186                         if (!udp)
4187                                 return rte_flow_error_set(error, EINVAL,
4188                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4189                                                 (void *)items->type,
4190                                                 "udp header not found");
4191                         if (!udp->dst_port)
4192                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4193                         if (!vxlan->vx_flags)
4194                                 vxlan->vx_flags =
4195                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4196                         break;
4197                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4198                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4199                         if (!udp)
4200                                 return rte_flow_error_set(error, EINVAL,
4201                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4202                                                 (void *)items->type,
4203                                                 "udp header not found");
4204                         if (!vxlan_gpe->proto)
4205                                 return rte_flow_error_set(error, EINVAL,
4206                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4207                                                 (void *)items->type,
4208                                                 "next protocol not found");
4209                         if (!udp->dst_port)
4210                                 udp->dst_port =
4211                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4212                         if (!vxlan_gpe->vx_flags)
4213                                 vxlan_gpe->vx_flags =
4214                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4215                         break;
4216                 case RTE_FLOW_ITEM_TYPE_GRE:
4217                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4218                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4219                         if (!gre->proto)
4220                                 return rte_flow_error_set(error, EINVAL,
4221                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4222                                                 (void *)items->type,
4223                                                 "next protocol not found");
4224                         if (!ipv4 && !ipv6)
4225                                 return rte_flow_error_set(error, EINVAL,
4226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4227                                                 (void *)items->type,
4228                                                 "ip header not found");
4229                         if (ipv4 && !ipv4->next_proto_id)
4230                                 ipv4->next_proto_id = IPPROTO_GRE;
4231                         else if (ipv6 && !ipv6->proto)
4232                                 ipv6->proto = IPPROTO_GRE;
4233                         break;
4234                 case RTE_FLOW_ITEM_TYPE_VOID:
4235                         break;
4236                 default:
4237                         return rte_flow_error_set(error, EINVAL,
4238                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4239                                                   (void *)items->type,
4240                                                   "unsupported item type");
4241                         break;
4242                 }
4243                 temp_size += len;
4244         }
4245         *size = temp_size;
4246         return 0;
4247 }
4248
4249 static int
4250 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4251 {
4252         struct rte_ether_hdr *eth = NULL;
4253         struct rte_vlan_hdr *vlan = NULL;
4254         struct rte_ipv6_hdr *ipv6 = NULL;
4255         struct rte_udp_hdr *udp = NULL;
4256         char *next_hdr;
4257         uint16_t proto;
4258
4259         eth = (struct rte_ether_hdr *)data;
4260         next_hdr = (char *)(eth + 1);
4261         proto = RTE_BE16(eth->ether_type);
4262
4263         /* VLAN skipping */
4264         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4265                 vlan = (struct rte_vlan_hdr *)next_hdr;
4266                 proto = RTE_BE16(vlan->eth_proto);
4267                 next_hdr += sizeof(struct rte_vlan_hdr);
4268         }
4269
4270         /* HW calculates IPv4 csum. no need to proceed */
4271         if (proto == RTE_ETHER_TYPE_IPV4)
4272                 return 0;
4273
4274         /* non IPv4/IPv6 header. not supported */
4275         if (proto != RTE_ETHER_TYPE_IPV6) {
4276                 return rte_flow_error_set(error, ENOTSUP,
4277                                           RTE_FLOW_ERROR_TYPE_ACTION,
4278                                           NULL, "Cannot offload non IPv4/IPv6");
4279         }
4280
4281         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4282
4283         /* ignore non UDP */
4284         if (ipv6->proto != IPPROTO_UDP)
4285                 return 0;
4286
4287         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4288         udp->dgram_cksum = 0;
4289
4290         return 0;
4291 }
4292
4293 /**
4294  * Convert L2 encap action to DV specification.
4295  *
4296  * @param[in] dev
4297  *   Pointer to rte_eth_dev structure.
4298  * @param[in] action
4299  *   Pointer to action structure.
4300  * @param[in, out] dev_flow
4301  *   Pointer to the mlx5_flow.
4302  * @param[in] transfer
4303  *   Mark if the flow is E-Switch flow.
4304  * @param[out] error
4305  *   Pointer to the error structure.
4306  *
4307  * @return
4308  *   0 on success, a negative errno value otherwise and rte_errno is set.
4309  */
4310 static int
4311 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4312                                const struct rte_flow_action *action,
4313                                struct mlx5_flow *dev_flow,
4314                                uint8_t transfer,
4315                                struct rte_flow_error *error)
4316 {
4317         const struct rte_flow_item *encap_data;
4318         const struct rte_flow_action_raw_encap *raw_encap_data;
4319         struct mlx5_flow_dv_encap_decap_resource res = {
4320                 .reformat_type =
4321                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4322                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4323                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4324         };
4325
4326         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4327                 raw_encap_data =
4328                         (const struct rte_flow_action_raw_encap *)action->conf;
4329                 res.size = raw_encap_data->size;
4330                 memcpy(res.buf, raw_encap_data->data, res.size);
4331         } else {
4332                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4333                         encap_data =
4334                                 ((const struct rte_flow_action_vxlan_encap *)
4335                                                 action->conf)->definition;
4336                 else
4337                         encap_data =
4338                                 ((const struct rte_flow_action_nvgre_encap *)
4339                                                 action->conf)->definition;
4340                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4341                                                &res.size, error))
4342                         return -rte_errno;
4343         }
4344         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4345                 return -rte_errno;
4346         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4347                 return rte_flow_error_set(error, EINVAL,
4348                                           RTE_FLOW_ERROR_TYPE_ACTION,
4349                                           NULL, "can't create L2 encap action");
4350         return 0;
4351 }
4352
4353 /**
4354  * Convert L2 decap action to DV specification.
4355  *
4356  * @param[in] dev
4357  *   Pointer to rte_eth_dev structure.
4358  * @param[in, out] dev_flow
4359  *   Pointer to the mlx5_flow.
4360  * @param[in] transfer
4361  *   Mark if the flow is E-Switch flow.
4362  * @param[out] error
4363  *   Pointer to the error structure.
4364  *
4365  * @return
4366  *   0 on success, a negative errno value otherwise and rte_errno is set.
4367  */
4368 static int
4369 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4370                                struct mlx5_flow *dev_flow,
4371                                uint8_t transfer,
4372                                struct rte_flow_error *error)
4373 {
4374         struct mlx5_flow_dv_encap_decap_resource res = {
4375                 .size = 0,
4376                 .reformat_type =
4377                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4378                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4379                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4380         };
4381
4382         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4383                 return rte_flow_error_set(error, EINVAL,
4384                                           RTE_FLOW_ERROR_TYPE_ACTION,
4385                                           NULL, "can't create L2 decap action");
4386         return 0;
4387 }
4388
4389 /**
4390  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4391  *
4392  * @param[in] dev
4393  *   Pointer to rte_eth_dev structure.
4394  * @param[in] action
4395  *   Pointer to action structure.
4396  * @param[in, out] dev_flow
4397  *   Pointer to the mlx5_flow.
4398  * @param[in] attr
4399  *   Pointer to the flow attributes.
4400  * @param[out] error
4401  *   Pointer to the error structure.
4402  *
4403  * @return
4404  *   0 on success, a negative errno value otherwise and rte_errno is set.
4405  */
4406 static int
4407 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4408                                 const struct rte_flow_action *action,
4409                                 struct mlx5_flow *dev_flow,
4410                                 const struct rte_flow_attr *attr,
4411                                 struct rte_flow_error *error)
4412 {
4413         const struct rte_flow_action_raw_encap *encap_data;
4414         struct mlx5_flow_dv_encap_decap_resource res;
4415
4416         memset(&res, 0, sizeof(res));
4417         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4418         res.size = encap_data->size;
4419         memcpy(res.buf, encap_data->data, res.size);
4420         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4421                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4422                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4423         if (attr->transfer)
4424                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4425         else
4426                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4427                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4428         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4429                 return rte_flow_error_set(error, EINVAL,
4430                                           RTE_FLOW_ERROR_TYPE_ACTION,
4431                                           NULL, "can't create encap action");
4432         return 0;
4433 }
4434
4435 /**
4436  * Create action push VLAN.
4437  *
4438  * @param[in] dev
4439  *   Pointer to rte_eth_dev structure.
4440  * @param[in] attr
4441  *   Pointer to the flow attributes.
4442  * @param[in] vlan
4443  *   Pointer to the vlan to push to the Ethernet header.
4444  * @param[in, out] dev_flow
4445  *   Pointer to the mlx5_flow.
4446  * @param[out] error
4447  *   Pointer to the error structure.
4448  *
4449  * @return
4450  *   0 on success, a negative errno value otherwise and rte_errno is set.
4451  */
4452 static int
4453 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4454                                 const struct rte_flow_attr *attr,
4455                                 const struct rte_vlan_hdr *vlan,
4456                                 struct mlx5_flow *dev_flow,
4457                                 struct rte_flow_error *error)
4458 {
4459         struct mlx5_flow_dv_push_vlan_action_resource res;
4460
4461         memset(&res, 0, sizeof(res));
4462         res.vlan_tag =
4463                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4464                                  vlan->vlan_tci);
4465         if (attr->transfer)
4466                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4467         else
4468                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4469                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4470         return flow_dv_push_vlan_action_resource_register
4471                                             (dev, &res, dev_flow, error);
4472 }
4473
4474 /**
4475  * Validate the modify-header actions.
4476  *
4477  * @param[in] action_flags
4478  *   Holds the actions detected until now.
4479  * @param[in] action
4480  *   Pointer to the modify action.
4481  * @param[out] error
4482  *   Pointer to error structure.
4483  *
4484  * @return
4485  *   0 on success, a negative errno value otherwise and rte_errno is set.
4486  */
4487 static int
4488 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4489                                    const struct rte_flow_action *action,
4490                                    struct rte_flow_error *error)
4491 {
4492         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4493                 return rte_flow_error_set(error, EINVAL,
4494                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4495                                           NULL, "action configuration not set");
4496         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4497                 return rte_flow_error_set(error, EINVAL,
4498                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4499                                           "can't have encap action before"
4500                                           " modify action");
4501         return 0;
4502 }
4503
4504 /**
4505  * Validate the modify-header MAC address actions.
4506  *
4507  * @param[in] action_flags
4508  *   Holds the actions detected until now.
4509  * @param[in] action
4510  *   Pointer to the modify action.
4511  * @param[in] item_flags
4512  *   Holds the items detected.
4513  * @param[out] error
4514  *   Pointer to error structure.
4515  *
4516  * @return
4517  *   0 on success, a negative errno value otherwise and rte_errno is set.
4518  */
4519 static int
4520 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4521                                    const struct rte_flow_action *action,
4522                                    const uint64_t item_flags,
4523                                    struct rte_flow_error *error)
4524 {
4525         int ret = 0;
4526
4527         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4528         if (!ret) {
4529                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4530                         return rte_flow_error_set(error, EINVAL,
4531                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4532                                                   NULL,
4533                                                   "no L2 item in pattern");
4534         }
4535         return ret;
4536 }
4537
4538 /**
4539  * Validate the modify-header IPv4 address actions.
4540  *
4541  * @param[in] action_flags
4542  *   Holds the actions detected until now.
4543  * @param[in] action
4544  *   Pointer to the modify action.
4545  * @param[in] item_flags
4546  *   Holds the items detected.
4547  * @param[out] error
4548  *   Pointer to error structure.
4549  *
4550  * @return
4551  *   0 on success, a negative errno value otherwise and rte_errno is set.
4552  */
4553 static int
4554 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4555                                     const struct rte_flow_action *action,
4556                                     const uint64_t item_flags,
4557                                     struct rte_flow_error *error)
4558 {
4559         int ret = 0;
4560         uint64_t layer;
4561
4562         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4563         if (!ret) {
4564                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4565                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4566                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4567                 if (!(item_flags & layer))
4568                         return rte_flow_error_set(error, EINVAL,
4569                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4570                                                   NULL,
4571                                                   "no ipv4 item in pattern");
4572         }
4573         return ret;
4574 }
4575
4576 /**
4577  * Validate the modify-header IPv6 address actions.
4578  *
4579  * @param[in] action_flags
4580  *   Holds the actions detected until now.
4581  * @param[in] action
4582  *   Pointer to the modify action.
4583  * @param[in] item_flags
4584  *   Holds the items detected.
4585  * @param[out] error
4586  *   Pointer to error structure.
4587  *
4588  * @return
4589  *   0 on success, a negative errno value otherwise and rte_errno is set.
4590  */
4591 static int
4592 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4593                                     const struct rte_flow_action *action,
4594                                     const uint64_t item_flags,
4595                                     struct rte_flow_error *error)
4596 {
4597         int ret = 0;
4598         uint64_t layer;
4599
4600         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4601         if (!ret) {
4602                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4603                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4604                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4605                 if (!(item_flags & layer))
4606                         return rte_flow_error_set(error, EINVAL,
4607                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4608                                                   NULL,
4609                                                   "no ipv6 item in pattern");
4610         }
4611         return ret;
4612 }
4613
4614 /**
4615  * Validate the modify-header TP actions.
4616  *
4617  * @param[in] action_flags
4618  *   Holds the actions detected until now.
4619  * @param[in] action
4620  *   Pointer to the modify action.
4621  * @param[in] item_flags
4622  *   Holds the items detected.
4623  * @param[out] error
4624  *   Pointer to error structure.
4625  *
4626  * @return
4627  *   0 on success, a negative errno value otherwise and rte_errno is set.
4628  */
4629 static int
4630 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4631                                   const struct rte_flow_action *action,
4632                                   const uint64_t item_flags,
4633                                   struct rte_flow_error *error)
4634 {
4635         int ret = 0;
4636         uint64_t layer;
4637
4638         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4639         if (!ret) {
4640                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4641                                  MLX5_FLOW_LAYER_INNER_L4 :
4642                                  MLX5_FLOW_LAYER_OUTER_L4;
4643                 if (!(item_flags & layer))
4644                         return rte_flow_error_set(error, EINVAL,
4645                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4646                                                   NULL, "no transport layer "
4647                                                   "in pattern");
4648         }
4649         return ret;
4650 }
4651
4652 /**
4653  * Validate the modify-header actions of increment/decrement
4654  * TCP Sequence-number.
4655  *
4656  * @param[in] action_flags
4657  *   Holds the actions detected until now.
4658  * @param[in] action
4659  *   Pointer to the modify action.
4660  * @param[in] item_flags
4661  *   Holds the items detected.
4662  * @param[out] error
4663  *   Pointer to error structure.
4664  *
4665  * @return
4666  *   0 on success, a negative errno value otherwise and rte_errno is set.
4667  */
4668 static int
4669 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4670                                        const struct rte_flow_action *action,
4671                                        const uint64_t item_flags,
4672                                        struct rte_flow_error *error)
4673 {
4674         int ret = 0;
4675         uint64_t layer;
4676
4677         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4678         if (!ret) {
4679                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4680                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4681                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4682                 if (!(item_flags & layer))
4683                         return rte_flow_error_set(error, EINVAL,
4684                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4685                                                   NULL, "no TCP item in"
4686                                                   " pattern");
4687                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4688                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4689                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4690                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4691                         return rte_flow_error_set(error, EINVAL,
4692                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4693                                                   NULL,
4694                                                   "cannot decrease and increase"
4695                                                   " TCP sequence number"
4696                                                   " at the same time");
4697         }
4698         return ret;
4699 }
4700
4701 /**
4702  * Validate the modify-header actions of increment/decrement
4703  * TCP Acknowledgment number.
4704  *
4705  * @param[in] action_flags
4706  *   Holds the actions detected until now.
4707  * @param[in] action
4708  *   Pointer to the modify action.
4709  * @param[in] item_flags
4710  *   Holds the items detected.
4711  * @param[out] error
4712  *   Pointer to error structure.
4713  *
4714  * @return
4715  *   0 on success, a negative errno value otherwise and rte_errno is set.
4716  */
4717 static int
4718 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4719                                        const struct rte_flow_action *action,
4720                                        const uint64_t item_flags,
4721                                        struct rte_flow_error *error)
4722 {
4723         int ret = 0;
4724         uint64_t layer;
4725
4726         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4727         if (!ret) {
4728                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4729                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4730                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4731                 if (!(item_flags & layer))
4732                         return rte_flow_error_set(error, EINVAL,
4733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4734                                                   NULL, "no TCP item in"
4735                                                   " pattern");
4736                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4737                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4738                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4739                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4740                         return rte_flow_error_set(error, EINVAL,
4741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4742                                                   NULL,
4743                                                   "cannot decrease and increase"
4744                                                   " TCP acknowledgment number"
4745                                                   " at the same time");
4746         }
4747         return ret;
4748 }
4749
4750 /**
4751  * Validate the modify-header TTL actions.
4752  *
4753  * @param[in] action_flags
4754  *   Holds the actions detected until now.
4755  * @param[in] action
4756  *   Pointer to the modify action.
4757  * @param[in] item_flags
4758  *   Holds the items detected.
4759  * @param[out] error
4760  *   Pointer to error structure.
4761  *
4762  * @return
4763  *   0 on success, a negative errno value otherwise and rte_errno is set.
4764  */
4765 static int
4766 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4767                                    const struct rte_flow_action *action,
4768                                    const uint64_t item_flags,
4769                                    struct rte_flow_error *error)
4770 {
4771         int ret = 0;
4772         uint64_t layer;
4773
4774         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4775         if (!ret) {
4776                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4777                                  MLX5_FLOW_LAYER_INNER_L3 :
4778                                  MLX5_FLOW_LAYER_OUTER_L3;
4779                 if (!(item_flags & layer))
4780                         return rte_flow_error_set(error, EINVAL,
4781                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4782                                                   NULL,
4783                                                   "no IP protocol in pattern");
4784         }
4785         return ret;
4786 }
4787
4788 /**
4789  * Validate the generic modify field actions.
4790  * @param[in] dev
4791  *   Pointer to the rte_eth_dev structure.
4792  * @param[in] action_flags
4793  *   Holds the actions detected until now.
4794  * @param[in] action
4795  *   Pointer to the modify action.
4796  * @param[in] attr
4797  *   Pointer to the flow attributes.
4798  * @param[out] error
4799  *   Pointer to error structure.
4800  *
4801  * @return
4802  *   Number of header fields to modify (0 or more) on success,
4803  *   a negative errno value otherwise and rte_errno is set.
4804  */
4805 static int
4806 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4807                                    const uint64_t action_flags,
4808                                    const struct rte_flow_action *action,
4809                                    const struct rte_flow_attr *attr,
4810                                    struct rte_flow_error *error)
4811 {
4812         int ret = 0;
4813         struct mlx5_priv *priv = dev->data->dev_private;
4814         struct mlx5_dev_config *config = &priv->config;
4815         const struct rte_flow_action_modify_field *action_modify_field =
4816                 action->conf;
4817         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4818                                 action_modify_field->dst.field,
4819                                 -1, attr, error);
4820         uint32_t src_width = mlx5_flow_item_field_width(dev,
4821                                 action_modify_field->src.field,
4822                                 dst_width, attr, error);
4823
4824         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4825         if (ret)
4826                 return ret;
4827
4828         if (action_modify_field->width == 0)
4829                 return rte_flow_error_set(error, EINVAL,
4830                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4831                                 "no bits are requested to be modified");
4832         else if (action_modify_field->width > dst_width ||
4833                  action_modify_field->width > src_width)
4834                 return rte_flow_error_set(error, EINVAL,
4835                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4836                                 "cannot modify more bits than"
4837                                 " the width of a field");
4838         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4839             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4840                 if ((action_modify_field->dst.offset +
4841                      action_modify_field->width > dst_width) ||
4842                     (action_modify_field->dst.offset % 32))
4843                         return rte_flow_error_set(error, EINVAL,
4844                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4845                                         "destination offset is too big"
4846                                         " or not aligned to 4 bytes");
4847                 if (action_modify_field->dst.level &&
4848                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4849                         return rte_flow_error_set(error, ENOTSUP,
4850                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4851                                         "inner header fields modification"
4852                                         " is not supported");
4853         }
4854         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4855             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4856                 if (!attr->transfer && !attr->group)
4857                         return rte_flow_error_set(error, ENOTSUP,
4858                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4859                                         "modify field action is not"
4860                                         " supported for group 0");
4861                 if ((action_modify_field->src.offset +
4862                      action_modify_field->width > src_width) ||
4863                     (action_modify_field->src.offset % 32))
4864                         return rte_flow_error_set(error, EINVAL,
4865                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4866                                         "source offset is too big"
4867                                         " or not aligned to 4 bytes");
4868                 if (action_modify_field->src.level &&
4869                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4870                         return rte_flow_error_set(error, ENOTSUP,
4871                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4872                                         "inner header fields modification"
4873                                         " is not supported");
4874         }
4875         if ((action_modify_field->dst.field ==
4876              action_modify_field->src.field) &&
4877             (action_modify_field->dst.level ==
4878              action_modify_field->src.level))
4879                 return rte_flow_error_set(error, EINVAL,
4880                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4881                                 "source and destination fields"
4882                                 " cannot be the same");
4883         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4884             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4885             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4886                 return rte_flow_error_set(error, EINVAL,
4887                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4888                                 "mark, immediate value or a pointer to it"
4889                                 " cannot be used as a destination");
4890         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4891             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4892                 return rte_flow_error_set(error, ENOTSUP,
4893                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4894                                 "modifications of an arbitrary"
4895                                 " place in a packet is not supported");
4896         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4897             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4898                 return rte_flow_error_set(error, ENOTSUP,
4899                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4900                                 "modifications of the 802.1Q Tag"
4901                                 " Identifier is not supported");
4902         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4903             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4904                 return rte_flow_error_set(error, ENOTSUP,
4905                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4906                                 "modifications of the VXLAN Network"
4907                                 " Identifier is not supported");
4908         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4909             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4910                 return rte_flow_error_set(error, ENOTSUP,
4911                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4912                                 "modifications of the GENEVE Network"
4913                                 " Identifier is not supported");
4914         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4915             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4916                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4917                     !mlx5_flow_ext_mreg_supported(dev))
4918                         return rte_flow_error_set(error, ENOTSUP,
4919                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4920                                         "cannot modify mark in legacy mode"
4921                                         " or without extensive registers");
4922         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4923             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4924                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4925                     !mlx5_flow_ext_mreg_supported(dev))
4926                         return rte_flow_error_set(error, ENOTSUP,
4927                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4928                                         "cannot modify meta without"
4929                                         " extensive registers support");
4930                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4931                 if (ret < 0 || ret == REG_NON)
4932                         return rte_flow_error_set(error, ENOTSUP,
4933                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4934                                         "cannot modify meta without"
4935                                         " extensive registers available");
4936         }
4937         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4938                 return rte_flow_error_set(error, ENOTSUP,
4939                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4940                                 "add and sub operations"
4941                                 " are not supported");
4942         return (action_modify_field->width / 32) +
4943                !!(action_modify_field->width % 32);
4944 }
4945
4946 /**
4947  * Validate jump action.
4948  *
4949  * @param[in] action
4950  *   Pointer to the jump action.
4951  * @param[in] action_flags
4952  *   Holds the actions detected until now.
4953  * @param[in] attributes
4954  *   Pointer to flow attributes
4955  * @param[in] external
4956  *   Action belongs to flow rule created by request external to PMD.
4957  * @param[out] error
4958  *   Pointer to error structure.
4959  *
4960  * @return
4961  *   0 on success, a negative errno value otherwise and rte_errno is set.
4962  */
4963 static int
4964 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4965                              const struct mlx5_flow_tunnel *tunnel,
4966                              const struct rte_flow_action *action,
4967                              uint64_t action_flags,
4968                              const struct rte_flow_attr *attributes,
4969                              bool external, struct rte_flow_error *error)
4970 {
4971         uint32_t target_group, table;
4972         int ret = 0;
4973         struct flow_grp_info grp_info = {
4974                 .external = !!external,
4975                 .transfer = !!attributes->transfer,
4976                 .fdb_def_rule = 1,
4977                 .std_tbl_fix = 0
4978         };
4979         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4980                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4981                 return rte_flow_error_set(error, EINVAL,
4982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4983                                           "can't have 2 fate actions in"
4984                                           " same flow");
4985         if (!action->conf)
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4988                                           NULL, "action configuration not set");
4989         target_group =
4990                 ((const struct rte_flow_action_jump *)action->conf)->group;
4991         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4992                                        &grp_info, error);
4993         if (ret)
4994                 return ret;
4995         if (attributes->group == target_group &&
4996             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4997                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4998                 return rte_flow_error_set(error, EINVAL,
4999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5000                                           "target group must be other than"
5001                                           " the current flow group");
5002         return 0;
5003 }
5004
5005 /*
5006  * Validate action PORT_ID / REPRESENTED_PORT.
5007  *
5008  * @param[in] dev
5009  *   Pointer to rte_eth_dev structure.
5010  * @param[in] action_flags
5011  *   Bit-fields that holds the actions detected until now.
5012  * @param[in] action
5013  *   PORT_ID / REPRESENTED_PORT action structure.
5014  * @param[in] attr
5015  *   Attributes of flow that includes this action.
5016  * @param[out] error
5017  *   Pointer to error structure.
5018  *
5019  * @return
5020  *   0 on success, a negative errno value otherwise and rte_errno is set.
5021  */
5022 static int
5023 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5024                                 uint64_t action_flags,
5025                                 const struct rte_flow_action *action,
5026                                 const struct rte_flow_attr *attr,
5027                                 struct rte_flow_error *error)
5028 {
5029         const struct rte_flow_action_port_id *port_id;
5030         const struct rte_flow_action_ethdev *ethdev;
5031         struct mlx5_priv *act_priv;
5032         struct mlx5_priv *dev_priv;
5033         uint16_t port;
5034
5035         if (!attr->transfer)
5036                 return rte_flow_error_set(error, ENOTSUP,
5037                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5038                                           NULL,
5039                                           "port action is valid in transfer"
5040                                           " mode only");
5041         if (!action || !action->conf)
5042                 return rte_flow_error_set(error, ENOTSUP,
5043                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5044                                           NULL,
5045                                           "port action parameters must be"
5046                                           " specified");
5047         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5048                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5049                 return rte_flow_error_set(error, EINVAL,
5050                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5051                                           "can have only one fate actions in"
5052                                           " a flow");
5053         dev_priv = mlx5_dev_to_eswitch_info(dev);
5054         if (!dev_priv)
5055                 return rte_flow_error_set(error, rte_errno,
5056                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5057                                           NULL,
5058                                           "failed to obtain E-Switch info");
5059         switch (action->type) {
5060         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5061                 port_id = action->conf;
5062                 port = port_id->original ? dev->data->port_id : port_id->id;
5063                 break;
5064         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5065                 ethdev = action->conf;
5066                 port = ethdev->port_id;
5067                 break;
5068         default:
5069                 MLX5_ASSERT(false);
5070                 return rte_flow_error_set
5071                                 (error, EINVAL,
5072                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5073                                  "unknown E-Switch action");
5074         }
5075         act_priv = mlx5_port_to_eswitch_info(port, false);
5076         if (!act_priv)
5077                 return rte_flow_error_set
5078                                 (error, rte_errno,
5079                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5080                                  "failed to obtain E-Switch port id for port");
5081         if (act_priv->domain_id != dev_priv->domain_id)
5082                 return rte_flow_error_set
5083                                 (error, EINVAL,
5084                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5085                                  "port does not belong to"
5086                                  " E-Switch being configured");
5087         return 0;
5088 }
5089
5090 /**
5091  * Get the maximum number of modify header actions.
5092  *
5093  * @param dev
5094  *   Pointer to rte_eth_dev structure.
5095  * @param root
5096  *   Whether action is on root table.
5097  *
5098  * @return
5099  *   Max number of modify header actions device can support.
5100  */
5101 static inline unsigned int
5102 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5103                               bool root)
5104 {
5105         /*
5106          * There's no way to directly query the max capacity from FW.
5107          * The maximal value on root table should be assumed to be supported.
5108          */
5109         if (!root)
5110                 return MLX5_MAX_MODIFY_NUM;
5111         else
5112                 return MLX5_ROOT_TBL_MODIFY_NUM;
5113 }
5114
5115 /**
5116  * Validate the meter action.
5117  *
5118  * @param[in] dev
5119  *   Pointer to rte_eth_dev structure.
5120  * @param[in] action_flags
5121  *   Bit-fields that holds the actions detected until now.
5122  * @param[in] item_flags
5123  *   Holds the items detected.
5124  * @param[in] action
5125  *   Pointer to the meter action.
5126  * @param[in] attr
5127  *   Attributes of flow that includes this action.
5128  * @param[in] port_id_item
5129  *   Pointer to item indicating port id.
5130  * @param[out] error
5131  *   Pointer to error structure.
5132  *
5133  * @return
5134  *   0 on success, a negative errno value otherwise and rte_errno is set.
5135  */
5136 static int
5137 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5138                                 uint64_t action_flags, uint64_t item_flags,
5139                                 const struct rte_flow_action *action,
5140                                 const struct rte_flow_attr *attr,
5141                                 const struct rte_flow_item *port_id_item,
5142                                 bool *def_policy,
5143                                 struct rte_flow_error *error)
5144 {
5145         struct mlx5_priv *priv = dev->data->dev_private;
5146         const struct rte_flow_action_meter *am = action->conf;
5147         struct mlx5_flow_meter_info *fm;
5148         struct mlx5_flow_meter_policy *mtr_policy;
5149         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5150
5151         if (!am)
5152                 return rte_flow_error_set(error, EINVAL,
5153                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5154                                           "meter action conf is NULL");
5155
5156         if (action_flags & MLX5_FLOW_ACTION_METER)
5157                 return rte_flow_error_set(error, ENOTSUP,
5158                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5159                                           "meter chaining not support");
5160         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5161                 return rte_flow_error_set(error, ENOTSUP,
5162                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5163                                           "meter with jump not support");
5164         if (!priv->mtr_en)
5165                 return rte_flow_error_set(error, ENOTSUP,
5166                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5167                                           NULL,
5168                                           "meter action not supported");
5169         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5170         if (!fm)
5171                 return rte_flow_error_set(error, EINVAL,
5172                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5173                                           "Meter not found");
5174         /* aso meter can always be shared by different domains */
5175         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5176             !(fm->transfer == attr->transfer ||
5177               (!fm->ingress && !attr->ingress && attr->egress) ||
5178               (!fm->egress && !attr->egress && attr->ingress)))
5179                 return rte_flow_error_set(error, EINVAL,
5180                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5181                         "Flow attributes domain are either invalid "
5182                         "or have a domain conflict with current "
5183                         "meter attributes");
5184         if (fm->def_policy) {
5185                 if (!((attr->transfer &&
5186                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5187                         (attr->egress &&
5188                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5189                         (attr->ingress &&
5190                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5191                         return rte_flow_error_set(error, EINVAL,
5192                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5193                                           "Flow attributes domain "
5194                                           "have a conflict with current "
5195                                           "meter domain attributes");
5196                 *def_policy = true;
5197         } else {
5198                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5199                                                 fm->policy_id, NULL);
5200                 if (!mtr_policy)
5201                         return rte_flow_error_set(error, EINVAL,
5202                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5203                                           "Invalid policy id for meter ");
5204                 if (!((attr->transfer && mtr_policy->transfer) ||
5205                         (attr->egress && mtr_policy->egress) ||
5206                         (attr->ingress && mtr_policy->ingress)))
5207                         return rte_flow_error_set(error, EINVAL,
5208                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5209                                           "Flow attributes domain "
5210                                           "have a conflict with current "
5211                                           "meter domain attributes");
5212                 if (attr->transfer && mtr_policy->dev) {
5213                         /**
5214                          * When policy has fate action of port_id,
5215                          * the flow should have the same src port as policy.
5216                          */
5217                         struct mlx5_priv *policy_port_priv =
5218                                         mtr_policy->dev->data->dev_private;
5219                         int32_t flow_src_port = priv->representor_id;
5220
5221                         if (port_id_item) {
5222                                 const struct rte_flow_item_port_id *spec =
5223                                                         port_id_item->spec;
5224                                 struct mlx5_priv *port_priv =
5225                                         mlx5_port_to_eswitch_info(spec->id,
5226                                                                   false);
5227                                 if (!port_priv)
5228                                         return rte_flow_error_set(error,
5229                                                 rte_errno,
5230                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5231                                                 spec,
5232                                                 "Failed to get port info.");
5233                                 flow_src_port = port_priv->representor_id;
5234                         }
5235                         if (flow_src_port != policy_port_priv->representor_id)
5236                                 return rte_flow_error_set(error,
5237                                                 rte_errno,
5238                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5239                                                 NULL,
5240                                                 "Flow and meter policy "
5241                                                 "have different src port.");
5242                 } else if (mtr_policy->is_rss) {
5243                         struct mlx5_flow_meter_policy *fp;
5244                         struct mlx5_meter_policy_action_container *acg;
5245                         struct mlx5_meter_policy_action_container *acy;
5246                         const struct rte_flow_action *rss_act;
5247                         int ret;
5248
5249                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5250                                                                 mtr_policy);
5251                         if (fp == NULL)
5252                                 return rte_flow_error_set(error, EINVAL,
5253                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5254                                                   "Unable to get the final "
5255                                                   "policy in the hierarchy");
5256                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5257                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5258                         MLX5_ASSERT(acg->fate_action ==
5259                                     MLX5_FLOW_FATE_SHARED_RSS ||
5260                                     acy->fate_action ==
5261                                     MLX5_FLOW_FATE_SHARED_RSS);
5262                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5263                                 rss_act = acg->rss;
5264                         else
5265                                 rss_act = acy->rss;
5266                         ret = mlx5_flow_validate_action_rss(rss_act,
5267                                         action_flags, dev, attr,
5268                                         item_flags, error);
5269                         if (ret)
5270                                 return ret;
5271                 }
5272                 *def_policy = false;
5273         }
5274         return 0;
5275 }
5276
5277 /**
5278  * Validate the age action.
5279  *
5280  * @param[in] action_flags
5281  *   Holds the actions detected until now.
5282  * @param[in] action
5283  *   Pointer to the age action.
5284  * @param[in] dev
5285  *   Pointer to the Ethernet device structure.
5286  * @param[out] error
5287  *   Pointer to error structure.
5288  *
5289  * @return
5290  *   0 on success, a negative errno value otherwise and rte_errno is set.
5291  */
5292 static int
5293 flow_dv_validate_action_age(uint64_t action_flags,
5294                             const struct rte_flow_action *action,
5295                             struct rte_eth_dev *dev,
5296                             struct rte_flow_error *error)
5297 {
5298         struct mlx5_priv *priv = dev->data->dev_private;
5299         const struct rte_flow_action_age *age = action->conf;
5300
5301         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5302             !priv->sh->aso_age_mng))
5303                 return rte_flow_error_set(error, ENOTSUP,
5304                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5305                                           NULL,
5306                                           "age action not supported");
5307         if (!(action->conf))
5308                 return rte_flow_error_set(error, EINVAL,
5309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5310                                           "configuration cannot be null");
5311         if (!(age->timeout))
5312                 return rte_flow_error_set(error, EINVAL,
5313                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5314                                           "invalid timeout value 0");
5315         if (action_flags & MLX5_FLOW_ACTION_AGE)
5316                 return rte_flow_error_set(error, EINVAL,
5317                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5318                                           "duplicate age actions set");
5319         return 0;
5320 }
5321
5322 /**
5323  * Validate the modify-header IPv4 DSCP actions.
5324  *
5325  * @param[in] action_flags
5326  *   Holds the actions detected until now.
5327  * @param[in] action
5328  *   Pointer to the modify action.
5329  * @param[in] item_flags
5330  *   Holds the items detected.
5331  * @param[out] error
5332  *   Pointer to error structure.
5333  *
5334  * @return
5335  *   0 on success, a negative errno value otherwise and rte_errno is set.
5336  */
5337 static int
5338 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5339                                          const struct rte_flow_action *action,
5340                                          const uint64_t item_flags,
5341                                          struct rte_flow_error *error)
5342 {
5343         int ret = 0;
5344
5345         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5346         if (!ret) {
5347                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5348                         return rte_flow_error_set(error, EINVAL,
5349                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5350                                                   NULL,
5351                                                   "no ipv4 item in pattern");
5352         }
5353         return ret;
5354 }
5355
5356 /**
5357  * Validate the modify-header IPv6 DSCP actions.
5358  *
5359  * @param[in] action_flags
5360  *   Holds the actions detected until now.
5361  * @param[in] action
5362  *   Pointer to the modify action.
5363  * @param[in] item_flags
5364  *   Holds the items detected.
5365  * @param[out] error
5366  *   Pointer to error structure.
5367  *
5368  * @return
5369  *   0 on success, a negative errno value otherwise and rte_errno is set.
5370  */
5371 static int
5372 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5373                                          const struct rte_flow_action *action,
5374                                          const uint64_t item_flags,
5375                                          struct rte_flow_error *error)
5376 {
5377         int ret = 0;
5378
5379         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5380         if (!ret) {
5381                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5382                         return rte_flow_error_set(error, EINVAL,
5383                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5384                                                   NULL,
5385                                                   "no ipv6 item in pattern");
5386         }
5387         return ret;
5388 }
5389
5390 int
5391 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5392                         struct mlx5_list_entry *entry, void *cb_ctx)
5393 {
5394         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5395         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5396         struct mlx5_flow_dv_modify_hdr_resource *resource =
5397                                   container_of(entry, typeof(*resource), entry);
5398         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5399
5400         key_len += ref->actions_num * sizeof(ref->actions[0]);
5401         return ref->actions_num != resource->actions_num ||
5402                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5403 }
5404
5405 static struct mlx5_indexed_pool *
5406 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5407 {
5408         struct mlx5_indexed_pool *ipool = __atomic_load_n
5409                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5410
5411         if (!ipool) {
5412                 struct mlx5_indexed_pool *expected = NULL;
5413                 struct mlx5_indexed_pool_config cfg =
5414                     (struct mlx5_indexed_pool_config) {
5415                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5416                                                                    (index + 1) *
5417                                            sizeof(struct mlx5_modification_cmd),
5418                        .trunk_size = 64,
5419                        .grow_trunk = 3,
5420                        .grow_shift = 2,
5421                        .need_lock = 1,
5422                        .release_mem_en = !!sh->reclaim_mode,
5423                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5424                        .malloc = mlx5_malloc,
5425                        .free = mlx5_free,
5426                        .type = "mlx5_modify_action_resource",
5427                 };
5428
5429                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5430                 ipool = mlx5_ipool_create(&cfg);
5431                 if (!ipool)
5432                         return NULL;
5433                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5434                                                  &expected, ipool, false,
5435                                                  __ATOMIC_SEQ_CST,
5436                                                  __ATOMIC_SEQ_CST)) {
5437                         mlx5_ipool_destroy(ipool);
5438                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5439                                                 __ATOMIC_SEQ_CST);
5440                 }
5441         }
5442         return ipool;
5443 }
5444
5445 struct mlx5_list_entry *
5446 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5447 {
5448         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5449         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5450         struct mlx5dv_dr_domain *ns;
5451         struct mlx5_flow_dv_modify_hdr_resource *entry;
5452         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5453         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5454                                                           ref->actions_num - 1);
5455         int ret;
5456         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5457         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5458         uint32_t idx;
5459
5460         if (unlikely(!ipool)) {
5461                 rte_flow_error_set(ctx->error, ENOMEM,
5462                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5463                                    NULL, "cannot allocate modify ipool");
5464                 return NULL;
5465         }
5466         entry = mlx5_ipool_zmalloc(ipool, &idx);
5467         if (!entry) {
5468                 rte_flow_error_set(ctx->error, ENOMEM,
5469                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5470                                    "cannot allocate resource memory");
5471                 return NULL;
5472         }
5473         rte_memcpy(&entry->ft_type,
5474                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5475                    key_len + data_len);
5476         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5477                 ns = sh->fdb_domain;
5478         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5479                 ns = sh->tx_domain;
5480         else
5481                 ns = sh->rx_domain;
5482         ret = mlx5_flow_os_create_flow_action_modify_header
5483                                         (sh->cdev->ctx, ns, entry,
5484                                          data_len, &entry->action);
5485         if (ret) {
5486                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5487                 rte_flow_error_set(ctx->error, ENOMEM,
5488                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5489                                    NULL, "cannot create modification action");
5490                 return NULL;
5491         }
5492         entry->idx = idx;
5493         return &entry->entry;
5494 }
5495
5496 struct mlx5_list_entry *
5497 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5498                         void *cb_ctx)
5499 {
5500         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5501         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5502         struct mlx5_flow_dv_modify_hdr_resource *entry;
5503         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5504         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5505         uint32_t idx;
5506
5507         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5508                                   &idx);
5509         if (!entry) {
5510                 rte_flow_error_set(ctx->error, ENOMEM,
5511                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5512                                    "cannot allocate resource memory");
5513                 return NULL;
5514         }
5515         memcpy(entry, oentry, sizeof(*entry) + data_len);
5516         entry->idx = idx;
5517         return &entry->entry;
5518 }
5519
5520 void
5521 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5522 {
5523         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5524         struct mlx5_flow_dv_modify_hdr_resource *res =
5525                 container_of(entry, typeof(*res), entry);
5526
5527         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5528 }
5529
5530 /**
5531  * Validate the sample action.
5532  *
5533  * @param[in, out] action_flags
5534  *   Holds the actions detected until now.
5535  * @param[in] action
5536  *   Pointer to the sample action.
5537  * @param[in] dev
5538  *   Pointer to the Ethernet device structure.
5539  * @param[in] attr
5540  *   Attributes of flow that includes this action.
5541  * @param[in] item_flags
5542  *   Holds the items detected.
5543  * @param[in] rss
5544  *   Pointer to the RSS action.
5545  * @param[out] sample_rss
5546  *   Pointer to the RSS action in sample action list.
5547  * @param[out] count
5548  *   Pointer to the COUNT action in sample action list.
5549  * @param[out] fdb_mirror_limit
5550  *   Pointer to the FDB mirror limitation flag.
5551  * @param[out] error
5552  *   Pointer to error structure.
5553  *
5554  * @return
5555  *   0 on success, a negative errno value otherwise and rte_errno is set.
5556  */
5557 static int
5558 flow_dv_validate_action_sample(uint64_t *action_flags,
5559                                const struct rte_flow_action *action,
5560                                struct rte_eth_dev *dev,
5561                                const struct rte_flow_attr *attr,
5562                                uint64_t item_flags,
5563                                const struct rte_flow_action_rss *rss,
5564                                const struct rte_flow_action_rss **sample_rss,
5565                                const struct rte_flow_action_count **count,
5566                                int *fdb_mirror_limit,
5567                                struct rte_flow_error *error)
5568 {
5569         struct mlx5_priv *priv = dev->data->dev_private;
5570         struct mlx5_dev_config *dev_conf = &priv->config;
5571         const struct rte_flow_action_sample *sample = action->conf;
5572         const struct rte_flow_action *act;
5573         uint64_t sub_action_flags = 0;
5574         uint16_t queue_index = 0xFFFF;
5575         int actions_n = 0;
5576         int ret;
5577
5578         if (!sample)
5579                 return rte_flow_error_set(error, EINVAL,
5580                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5581                                           "configuration cannot be NULL");
5582         if (sample->ratio == 0)
5583                 return rte_flow_error_set(error, EINVAL,
5584                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5585                                           "ratio value starts from 1");
5586         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5587                 return rte_flow_error_set(error, ENOTSUP,
5588                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5589                                           NULL,
5590                                           "sample action not supported");
5591         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5592                 return rte_flow_error_set(error, EINVAL,
5593                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5594                                           "Multiple sample actions not "
5595                                           "supported");
5596         if (*action_flags & MLX5_FLOW_ACTION_METER)
5597                 return rte_flow_error_set(error, EINVAL,
5598                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5599                                           "wrong action order, meter should "
5600                                           "be after sample action");
5601         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5602                 return rte_flow_error_set(error, EINVAL,
5603                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5604                                           "wrong action order, jump should "
5605                                           "be after sample action");
5606         if (*action_flags & MLX5_FLOW_ACTION_CT)
5607                 return rte_flow_error_set(error, EINVAL,
5608                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5609                                           "Sample after CT not supported");
5610         act = sample->actions;
5611         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5612                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5613                         return rte_flow_error_set(error, ENOTSUP,
5614                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5615                                                   act, "too many actions");
5616                 switch (act->type) {
5617                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5618                         ret = mlx5_flow_validate_action_queue(act,
5619                                                               sub_action_flags,
5620                                                               dev,
5621                                                               attr, error);
5622                         if (ret < 0)
5623                                 return ret;
5624                         queue_index = ((const struct rte_flow_action_queue *)
5625                                                         (act->conf))->index;
5626                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5627                         ++actions_n;
5628                         break;
5629                 case RTE_FLOW_ACTION_TYPE_RSS:
5630                         *sample_rss = act->conf;
5631                         ret = mlx5_flow_validate_action_rss(act,
5632                                                             sub_action_flags,
5633                                                             dev, attr,
5634                                                             item_flags,
5635                                                             error);
5636                         if (ret < 0)
5637                                 return ret;
5638                         if (rss && *sample_rss &&
5639                             ((*sample_rss)->level != rss->level ||
5640                             (*sample_rss)->types != rss->types))
5641                                 return rte_flow_error_set(error, ENOTSUP,
5642                                         RTE_FLOW_ERROR_TYPE_ACTION,
5643                                         NULL,
5644                                         "Can't use the different RSS types "
5645                                         "or level in the same flow");
5646                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5647                                 queue_index = (*sample_rss)->queue[0];
5648                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5649                         ++actions_n;
5650                         break;
5651                 case RTE_FLOW_ACTION_TYPE_MARK:
5652                         ret = flow_dv_validate_action_mark(dev, act,
5653                                                            sub_action_flags,
5654                                                            attr, error);
5655                         if (ret < 0)
5656                                 return ret;
5657                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5658                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5659                                                 MLX5_FLOW_ACTION_MARK_EXT;
5660                         else
5661                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5662                         ++actions_n;
5663                         break;
5664                 case RTE_FLOW_ACTION_TYPE_COUNT:
5665                         ret = flow_dv_validate_action_count
5666                                 (dev, false, *action_flags | sub_action_flags,
5667                                  error);
5668                         if (ret < 0)
5669                                 return ret;
5670                         *count = act->conf;
5671                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5672                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5673                         ++actions_n;
5674                         break;
5675                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5676                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5677                         ret = flow_dv_validate_action_port_id(dev,
5678                                                               sub_action_flags,
5679                                                               act,
5680                                                               attr,
5681                                                               error);
5682                         if (ret)
5683                                 return ret;
5684                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5685                         ++actions_n;
5686                         break;
5687                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5688                         ret = flow_dv_validate_action_raw_encap_decap
5689                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5690                                  &actions_n, action, item_flags, error);
5691                         if (ret < 0)
5692                                 return ret;
5693                         ++actions_n;
5694                         break;
5695                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5696                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5697                         ret = flow_dv_validate_action_l2_encap(dev,
5698                                                                sub_action_flags,
5699                                                                act, attr,
5700                                                                error);
5701                         if (ret < 0)
5702                                 return ret;
5703                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5704                         ++actions_n;
5705                         break;
5706                 default:
5707                         return rte_flow_error_set(error, ENOTSUP,
5708                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5709                                                   NULL,
5710                                                   "Doesn't support optional "
5711                                                   "action");
5712                 }
5713         }
5714         if (attr->ingress && !attr->transfer) {
5715                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5716                                           MLX5_FLOW_ACTION_RSS)))
5717                         return rte_flow_error_set(error, EINVAL,
5718                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5719                                                   NULL,
5720                                                   "Ingress must has a dest "
5721                                                   "QUEUE for Sample");
5722         } else if (attr->egress && !attr->transfer) {
5723                 return rte_flow_error_set(error, ENOTSUP,
5724                                           RTE_FLOW_ERROR_TYPE_ACTION,
5725                                           NULL,
5726                                           "Sample Only support Ingress "
5727                                           "or E-Switch");
5728         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5729                 MLX5_ASSERT(attr->transfer);
5730                 if (sample->ratio > 1)
5731                         return rte_flow_error_set(error, ENOTSUP,
5732                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5733                                                   NULL,
5734                                                   "E-Switch doesn't support "
5735                                                   "any optional action "
5736                                                   "for sampling");
5737                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5738                         return rte_flow_error_set(error, ENOTSUP,
5739                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5740                                                   NULL,
5741                                                   "unsupported action QUEUE");
5742                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5743                         return rte_flow_error_set(error, ENOTSUP,
5744                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5745                                                   NULL,
5746                                                   "unsupported action QUEUE");
5747                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5748                         return rte_flow_error_set(error, EINVAL,
5749                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5750                                                   NULL,
5751                                                   "E-Switch must has a dest "
5752                                                   "port for mirroring");
5753                 if (!priv->config.hca_attr.reg_c_preserve &&
5754                      priv->representor_id != UINT16_MAX)
5755                         *fdb_mirror_limit = 1;
5756         }
5757         /* Continue validation for Xcap actions.*/
5758         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5759             (queue_index == 0xFFFF ||
5760              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5761                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5762                      MLX5_FLOW_XCAP_ACTIONS)
5763                         return rte_flow_error_set(error, ENOTSUP,
5764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5765                                                   NULL, "encap and decap "
5766                                                   "combination aren't "
5767                                                   "supported");
5768                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5769                                                         MLX5_FLOW_ACTION_ENCAP))
5770                         return rte_flow_error_set(error, ENOTSUP,
5771                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5772                                                   NULL, "encap is not supported"
5773                                                   " for ingress traffic");
5774         }
5775         return 0;
5776 }
5777
5778 /**
5779  * Find existing modify-header resource or create and register a new one.
5780  *
5781  * @param dev[in, out]
5782  *   Pointer to rte_eth_dev structure.
5783  * @param[in, out] resource
5784  *   Pointer to modify-header resource.
5785  * @parm[in, out] dev_flow
5786  *   Pointer to the dev_flow.
5787  * @param[out] error
5788  *   pointer to error structure.
5789  *
5790  * @return
5791  *   0 on success otherwise -errno and errno is set.
5792  */
5793 static int
5794 flow_dv_modify_hdr_resource_register
5795                         (struct rte_eth_dev *dev,
5796                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5797                          struct mlx5_flow *dev_flow,
5798                          struct rte_flow_error *error)
5799 {
5800         struct mlx5_priv *priv = dev->data->dev_private;
5801         struct mlx5_dev_ctx_shared *sh = priv->sh;
5802         uint32_t key_len = sizeof(*resource) -
5803                            offsetof(typeof(*resource), ft_type) +
5804                            resource->actions_num * sizeof(resource->actions[0]);
5805         struct mlx5_list_entry *entry;
5806         struct mlx5_flow_cb_ctx ctx = {
5807                 .error = error,
5808                 .data = resource,
5809         };
5810         struct mlx5_hlist *modify_cmds;
5811         uint64_t key64;
5812
5813         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5814                                 "hdr_modify",
5815                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5816                                 true, false, sh,
5817                                 flow_dv_modify_create_cb,
5818                                 flow_dv_modify_match_cb,
5819                                 flow_dv_modify_remove_cb,
5820                                 flow_dv_modify_clone_cb,
5821                                 flow_dv_modify_clone_free_cb);
5822         if (unlikely(!modify_cmds))
5823                 return -rte_errno;
5824         resource->root = !dev_flow->dv.group;
5825         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5826                                                                 resource->root))
5827                 return rte_flow_error_set(error, EOVERFLOW,
5828                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5829                                           "too many modify header items");
5830         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5831         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5832         if (!entry)
5833                 return -rte_errno;
5834         resource = container_of(entry, typeof(*resource), entry);
5835         dev_flow->handle->dvh.modify_hdr = resource;
5836         return 0;
5837 }
5838
5839 /**
5840  * Get DV flow counter by index.
5841  *
5842  * @param[in] dev
5843  *   Pointer to the Ethernet device structure.
5844  * @param[in] idx
5845  *   mlx5 flow counter index in the container.
5846  * @param[out] ppool
5847  *   mlx5 flow counter pool in the container.
5848  *
5849  * @return
5850  *   Pointer to the counter, NULL otherwise.
5851  */
5852 static struct mlx5_flow_counter *
5853 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5854                            uint32_t idx,
5855                            struct mlx5_flow_counter_pool **ppool)
5856 {
5857         struct mlx5_priv *priv = dev->data->dev_private;
5858         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5859         struct mlx5_flow_counter_pool *pool;
5860
5861         /* Decrease to original index and clear shared bit. */
5862         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5863         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5864         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5865         MLX5_ASSERT(pool);
5866         if (ppool)
5867                 *ppool = pool;
5868         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5869 }
5870
5871 /**
5872  * Check the devx counter belongs to the pool.
5873  *
5874  * @param[in] pool
5875  *   Pointer to the counter pool.
5876  * @param[in] id
5877  *   The counter devx ID.
5878  *
5879  * @return
5880  *   True if counter belongs to the pool, false otherwise.
5881  */
5882 static bool
5883 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5884 {
5885         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5886                    MLX5_COUNTERS_PER_POOL;
5887
5888         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5889                 return true;
5890         return false;
5891 }
5892
5893 /**
5894  * Get a pool by devx counter ID.
5895  *
5896  * @param[in] cmng
5897  *   Pointer to the counter management.
5898  * @param[in] id
5899  *   The counter devx ID.
5900  *
5901  * @return
5902  *   The counter pool pointer if exists, NULL otherwise,
5903  */
5904 static struct mlx5_flow_counter_pool *
5905 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5906 {
5907         uint32_t i;
5908         struct mlx5_flow_counter_pool *pool = NULL;
5909
5910         rte_spinlock_lock(&cmng->pool_update_sl);
5911         /* Check last used pool. */
5912         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5913             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5914                 pool = cmng->pools[cmng->last_pool_idx];
5915                 goto out;
5916         }
5917         /* ID out of range means no suitable pool in the container. */
5918         if (id > cmng->max_id || id < cmng->min_id)
5919                 goto out;
5920         /*
5921          * Find the pool from the end of the container, since mostly counter
5922          * ID is sequence increasing, and the last pool should be the needed
5923          * one.
5924          */
5925         i = cmng->n_valid;
5926         while (i--) {
5927                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5928
5929                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5930                         pool = pool_tmp;
5931                         break;
5932                 }
5933         }
5934 out:
5935         rte_spinlock_unlock(&cmng->pool_update_sl);
5936         return pool;
5937 }
5938
5939 /**
5940  * Resize a counter container.
5941  *
5942  * @param[in] dev
5943  *   Pointer to the Ethernet device structure.
5944  *
5945  * @return
5946  *   0 on success, otherwise negative errno value and rte_errno is set.
5947  */
5948 static int
5949 flow_dv_container_resize(struct rte_eth_dev *dev)
5950 {
5951         struct mlx5_priv *priv = dev->data->dev_private;
5952         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5953         void *old_pools = cmng->pools;
5954         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5955         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5956         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5957
5958         if (!pools) {
5959                 rte_errno = ENOMEM;
5960                 return -ENOMEM;
5961         }
5962         if (old_pools)
5963                 memcpy(pools, old_pools, cmng->n *
5964                                        sizeof(struct mlx5_flow_counter_pool *));
5965         cmng->n = resize;
5966         cmng->pools = pools;
5967         if (old_pools)
5968                 mlx5_free(old_pools);
5969         return 0;
5970 }
5971
5972 /**
5973  * Query a devx flow counter.
5974  *
5975  * @param[in] dev
5976  *   Pointer to the Ethernet device structure.
5977  * @param[in] counter
5978  *   Index to the flow counter.
5979  * @param[out] pkts
5980  *   The statistics value of packets.
5981  * @param[out] bytes
5982  *   The statistics value of bytes.
5983  *
5984  * @return
5985  *   0 on success, otherwise a negative errno value and rte_errno is set.
5986  */
5987 static inline int
5988 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5989                      uint64_t *bytes)
5990 {
5991         struct mlx5_priv *priv = dev->data->dev_private;
5992         struct mlx5_flow_counter_pool *pool = NULL;
5993         struct mlx5_flow_counter *cnt;
5994         int offset;
5995
5996         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5997         MLX5_ASSERT(pool);
5998         if (priv->sh->cmng.counter_fallback)
5999                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6000                                         0, pkts, bytes, 0, NULL, NULL, 0);
6001         rte_spinlock_lock(&pool->sl);
6002         if (!pool->raw) {
6003                 *pkts = 0;
6004                 *bytes = 0;
6005         } else {
6006                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6007                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6008                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6009         }
6010         rte_spinlock_unlock(&pool->sl);
6011         return 0;
6012 }
6013
6014 /**
6015  * Create and initialize a new counter pool.
6016  *
6017  * @param[in] dev
6018  *   Pointer to the Ethernet device structure.
6019  * @param[out] dcs
6020  *   The devX counter handle.
6021  * @param[in] age
6022  *   Whether the pool is for counter that was allocated for aging.
6023  * @param[in/out] cont_cur
6024  *   Pointer to the container pointer, it will be update in pool resize.
6025  *
6026  * @return
6027  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6028  */
6029 static struct mlx5_flow_counter_pool *
6030 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6031                     uint32_t age)
6032 {
6033         struct mlx5_priv *priv = dev->data->dev_private;
6034         struct mlx5_flow_counter_pool *pool;
6035         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6036         bool fallback = priv->sh->cmng.counter_fallback;
6037         uint32_t size = sizeof(*pool);
6038
6039         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6040         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6041         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6042         if (!pool) {
6043                 rte_errno = ENOMEM;
6044                 return NULL;
6045         }
6046         pool->raw = NULL;
6047         pool->is_aged = !!age;
6048         pool->query_gen = 0;
6049         pool->min_dcs = dcs;
6050         rte_spinlock_init(&pool->sl);
6051         rte_spinlock_init(&pool->csl);
6052         TAILQ_INIT(&pool->counters[0]);
6053         TAILQ_INIT(&pool->counters[1]);
6054         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6055         rte_spinlock_lock(&cmng->pool_update_sl);
6056         pool->index = cmng->n_valid;
6057         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6058                 mlx5_free(pool);
6059                 rte_spinlock_unlock(&cmng->pool_update_sl);
6060                 return NULL;
6061         }
6062         cmng->pools[pool->index] = pool;
6063         cmng->n_valid++;
6064         if (unlikely(fallback)) {
6065                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6066
6067                 if (base < cmng->min_id)
6068                         cmng->min_id = base;
6069                 if (base > cmng->max_id)
6070                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6071                 cmng->last_pool_idx = pool->index;
6072         }
6073         rte_spinlock_unlock(&cmng->pool_update_sl);
6074         return pool;
6075 }
6076
6077 /**
6078  * Prepare a new counter and/or a new counter pool.
6079  *
6080  * @param[in] dev
6081  *   Pointer to the Ethernet device structure.
6082  * @param[out] cnt_free
6083  *   Where to put the pointer of a new counter.
6084  * @param[in] age
6085  *   Whether the pool is for counter that was allocated for aging.
6086  *
6087  * @return
6088  *   The counter pool pointer and @p cnt_free is set on success,
6089  *   NULL otherwise and rte_errno is set.
6090  */
6091 static struct mlx5_flow_counter_pool *
6092 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6093                              struct mlx5_flow_counter **cnt_free,
6094                              uint32_t age)
6095 {
6096         struct mlx5_priv *priv = dev->data->dev_private;
6097         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6098         struct mlx5_flow_counter_pool *pool;
6099         struct mlx5_counters tmp_tq;
6100         struct mlx5_devx_obj *dcs = NULL;
6101         struct mlx5_flow_counter *cnt;
6102         enum mlx5_counter_type cnt_type =
6103                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6104         bool fallback = priv->sh->cmng.counter_fallback;
6105         uint32_t i;
6106
6107         if (fallback) {
6108                 /* bulk_bitmap must be 0 for single counter allocation. */
6109                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6110                 if (!dcs)
6111                         return NULL;
6112                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6113                 if (!pool) {
6114                         pool = flow_dv_pool_create(dev, dcs, age);
6115                         if (!pool) {
6116                                 mlx5_devx_cmd_destroy(dcs);
6117                                 return NULL;
6118                         }
6119                 }
6120                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6121                 cnt = MLX5_POOL_GET_CNT(pool, i);
6122                 cnt->pool = pool;
6123                 cnt->dcs_when_free = dcs;
6124                 *cnt_free = cnt;
6125                 return pool;
6126         }
6127         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6128         if (!dcs) {
6129                 rte_errno = ENODATA;
6130                 return NULL;
6131         }
6132         pool = flow_dv_pool_create(dev, dcs, age);
6133         if (!pool) {
6134                 mlx5_devx_cmd_destroy(dcs);
6135                 return NULL;
6136         }
6137         TAILQ_INIT(&tmp_tq);
6138         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6139                 cnt = MLX5_POOL_GET_CNT(pool, i);
6140                 cnt->pool = pool;
6141                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6142         }
6143         rte_spinlock_lock(&cmng->csl[cnt_type]);
6144         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6145         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6146         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6147         (*cnt_free)->pool = pool;
6148         return pool;
6149 }
6150
6151 /**
6152  * Allocate a flow counter.
6153  *
6154  * @param[in] dev
6155  *   Pointer to the Ethernet device structure.
6156  * @param[in] age
6157  *   Whether the counter was allocated for aging.
6158  *
6159  * @return
6160  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6161  */
6162 static uint32_t
6163 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6164 {
6165         struct mlx5_priv *priv = dev->data->dev_private;
6166         struct mlx5_flow_counter_pool *pool = NULL;
6167         struct mlx5_flow_counter *cnt_free = NULL;
6168         bool fallback = priv->sh->cmng.counter_fallback;
6169         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6170         enum mlx5_counter_type cnt_type =
6171                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6172         uint32_t cnt_idx;
6173
6174         if (!priv->sh->devx) {
6175                 rte_errno = ENOTSUP;
6176                 return 0;
6177         }
6178         /* Get free counters from container. */
6179         rte_spinlock_lock(&cmng->csl[cnt_type]);
6180         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6181         if (cnt_free)
6182                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6183         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6184         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6185                 goto err;
6186         pool = cnt_free->pool;
6187         if (fallback)
6188                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6189         /* Create a DV counter action only in the first time usage. */
6190         if (!cnt_free->action) {
6191                 uint16_t offset;
6192                 struct mlx5_devx_obj *dcs;
6193                 int ret;
6194
6195                 if (!fallback) {
6196                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6197                         dcs = pool->min_dcs;
6198                 } else {
6199                         offset = 0;
6200                         dcs = cnt_free->dcs_when_free;
6201                 }
6202                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6203                                                             &cnt_free->action);
6204                 if (ret) {
6205                         rte_errno = errno;
6206                         goto err;
6207                 }
6208         }
6209         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6210                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6211         /* Update the counter reset values. */
6212         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6213                                  &cnt_free->bytes))
6214                 goto err;
6215         if (!fallback && !priv->sh->cmng.query_thread_on)
6216                 /* Start the asynchronous batch query by the host thread. */
6217                 mlx5_set_query_alarm(priv->sh);
6218         /*
6219          * When the count action isn't shared (by ID), shared_info field is
6220          * used for indirect action API's refcnt.
6221          * When the counter action is not shared neither by ID nor by indirect
6222          * action API, shared info must be 1.
6223          */
6224         cnt_free->shared_info.refcnt = 1;
6225         return cnt_idx;
6226 err:
6227         if (cnt_free) {
6228                 cnt_free->pool = pool;
6229                 if (fallback)
6230                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6231                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6232                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6233                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6234         }
6235         return 0;
6236 }
6237
6238 /**
6239  * Get age param from counter index.
6240  *
6241  * @param[in] dev
6242  *   Pointer to the Ethernet device structure.
6243  * @param[in] counter
6244  *   Index to the counter handler.
6245  *
6246  * @return
6247  *   The aging parameter specified for the counter index.
6248  */
6249 static struct mlx5_age_param*
6250 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6251                                 uint32_t counter)
6252 {
6253         struct mlx5_flow_counter *cnt;
6254         struct mlx5_flow_counter_pool *pool = NULL;
6255
6256         flow_dv_counter_get_by_idx(dev, counter, &pool);
6257         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6258         cnt = MLX5_POOL_GET_CNT(pool, counter);
6259         return MLX5_CNT_TO_AGE(cnt);
6260 }
6261
6262 /**
6263  * Remove a flow counter from aged counter list.
6264  *
6265  * @param[in] dev
6266  *   Pointer to the Ethernet device structure.
6267  * @param[in] counter
6268  *   Index to the counter handler.
6269  * @param[in] cnt
6270  *   Pointer to the counter handler.
6271  */
6272 static void
6273 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6274                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6275 {
6276         struct mlx5_age_info *age_info;
6277         struct mlx5_age_param *age_param;
6278         struct mlx5_priv *priv = dev->data->dev_private;
6279         uint16_t expected = AGE_CANDIDATE;
6280
6281         age_info = GET_PORT_AGE_INFO(priv);
6282         age_param = flow_dv_counter_idx_get_age(dev, counter);
6283         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6284                                          AGE_FREE, false, __ATOMIC_RELAXED,
6285                                          __ATOMIC_RELAXED)) {
6286                 /**
6287                  * We need the lock even it is age timeout,
6288                  * since counter may still in process.
6289                  */
6290                 rte_spinlock_lock(&age_info->aged_sl);
6291                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6292                 rte_spinlock_unlock(&age_info->aged_sl);
6293                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6294         }
6295 }
6296
6297 /**
6298  * Release a flow counter.
6299  *
6300  * @param[in] dev
6301  *   Pointer to the Ethernet device structure.
6302  * @param[in] counter
6303  *   Index to the counter handler.
6304  */
6305 static void
6306 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6307 {
6308         struct mlx5_priv *priv = dev->data->dev_private;
6309         struct mlx5_flow_counter_pool *pool = NULL;
6310         struct mlx5_flow_counter *cnt;
6311         enum mlx5_counter_type cnt_type;
6312
6313         if (!counter)
6314                 return;
6315         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6316         MLX5_ASSERT(pool);
6317         if (pool->is_aged) {
6318                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6319         } else {
6320                 /*
6321                  * If the counter action is shared by indirect action API,
6322                  * the atomic function reduces its references counter.
6323                  * If after the reduction the action is still referenced, the
6324                  * function returns here and does not release it.
6325                  * When the counter action is not shared by
6326                  * indirect action API, shared info is 1 before the reduction,
6327                  * so this condition is failed and function doesn't return here.
6328                  */
6329                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6330                                        __ATOMIC_RELAXED))
6331                         return;
6332         }
6333         cnt->pool = pool;
6334         /*
6335          * Put the counter back to list to be updated in none fallback mode.
6336          * Currently, we are using two list alternately, while one is in query,
6337          * add the freed counter to the other list based on the pool query_gen
6338          * value. After query finishes, add counter the list to the global
6339          * container counter list. The list changes while query starts. In
6340          * this case, lock will not be needed as query callback and release
6341          * function both operate with the different list.
6342          */
6343         if (!priv->sh->cmng.counter_fallback) {
6344                 rte_spinlock_lock(&pool->csl);
6345                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6346                 rte_spinlock_unlock(&pool->csl);
6347         } else {
6348                 cnt->dcs_when_free = cnt->dcs_when_active;
6349                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6350                                            MLX5_COUNTER_TYPE_ORIGIN;
6351                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6352                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6353                                   cnt, next);
6354                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6355         }
6356 }
6357
6358 /**
6359  * Resize a meter id container.
6360  *
6361  * @param[in] dev
6362  *   Pointer to the Ethernet device structure.
6363  *
6364  * @return
6365  *   0 on success, otherwise negative errno value and rte_errno is set.
6366  */
6367 static int
6368 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6369 {
6370         struct mlx5_priv *priv = dev->data->dev_private;
6371         struct mlx5_aso_mtr_pools_mng *pools_mng =
6372                                 &priv->sh->mtrmng->pools_mng;
6373         void *old_pools = pools_mng->pools;
6374         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6375         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6376         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6377
6378         if (!pools) {
6379                 rte_errno = ENOMEM;
6380                 return -ENOMEM;
6381         }
6382         if (!pools_mng->n)
6383                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6384                         mlx5_free(pools);
6385                         return -ENOMEM;
6386                 }
6387         if (old_pools)
6388                 memcpy(pools, old_pools, pools_mng->n *
6389                                        sizeof(struct mlx5_aso_mtr_pool *));
6390         pools_mng->n = resize;
6391         pools_mng->pools = pools;
6392         if (old_pools)
6393                 mlx5_free(old_pools);
6394         return 0;
6395 }
6396
6397 /**
6398  * Prepare a new meter and/or a new meter pool.
6399  *
6400  * @param[in] dev
6401  *   Pointer to the Ethernet device structure.
6402  * @param[out] mtr_free
6403  *   Where to put the pointer of a new meter.g.
6404  *
6405  * @return
6406  *   The meter pool pointer and @mtr_free is set on success,
6407  *   NULL otherwise and rte_errno is set.
6408  */
6409 static struct mlx5_aso_mtr_pool *
6410 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6411 {
6412         struct mlx5_priv *priv = dev->data->dev_private;
6413         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6414         struct mlx5_aso_mtr_pool *pool = NULL;
6415         struct mlx5_devx_obj *dcs = NULL;
6416         uint32_t i;
6417         uint32_t log_obj_size;
6418
6419         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6420         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6421                                                       priv->sh->cdev->pdn,
6422                                                       log_obj_size);
6423         if (!dcs) {
6424                 rte_errno = ENODATA;
6425                 return NULL;
6426         }
6427         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6428         if (!pool) {
6429                 rte_errno = ENOMEM;
6430                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6431                 return NULL;
6432         }
6433         pool->devx_obj = dcs;
6434         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6435         pool->index = pools_mng->n_valid;
6436         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6437                 mlx5_free(pool);
6438                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6439                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6440                 return NULL;
6441         }
6442         pools_mng->pools[pool->index] = pool;
6443         pools_mng->n_valid++;
6444         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6445         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6446                 pool->mtrs[i].offset = i;
6447                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6448         }
6449         pool->mtrs[0].offset = 0;
6450         *mtr_free = &pool->mtrs[0];
6451         return pool;
6452 }
6453
6454 /**
6455  * Release a flow meter into pool.
6456  *
6457  * @param[in] dev
6458  *   Pointer to the Ethernet device structure.
6459  * @param[in] mtr_idx
6460  *   Index to aso flow meter.
6461  */
6462 static void
6463 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6464 {
6465         struct mlx5_priv *priv = dev->data->dev_private;
6466         struct mlx5_aso_mtr_pools_mng *pools_mng =
6467                                 &priv->sh->mtrmng->pools_mng;
6468         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6469
6470         MLX5_ASSERT(aso_mtr);
6471         rte_spinlock_lock(&pools_mng->mtrsl);
6472         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6473         aso_mtr->state = ASO_METER_FREE;
6474         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6475         rte_spinlock_unlock(&pools_mng->mtrsl);
6476 }
6477
6478 /**
6479  * Allocate a aso flow meter.
6480  *
6481  * @param[in] dev
6482  *   Pointer to the Ethernet device structure.
6483  *
6484  * @return
6485  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6486  */
6487 static uint32_t
6488 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6489 {
6490         struct mlx5_priv *priv = dev->data->dev_private;
6491         struct mlx5_aso_mtr *mtr_free = NULL;
6492         struct mlx5_aso_mtr_pools_mng *pools_mng =
6493                                 &priv->sh->mtrmng->pools_mng;
6494         struct mlx5_aso_mtr_pool *pool;
6495         uint32_t mtr_idx = 0;
6496
6497         if (!priv->sh->devx) {
6498                 rte_errno = ENOTSUP;
6499                 return 0;
6500         }
6501         /* Allocate the flow meter memory. */
6502         /* Get free meters from management. */
6503         rte_spinlock_lock(&pools_mng->mtrsl);
6504         mtr_free = LIST_FIRST(&pools_mng->meters);
6505         if (mtr_free)
6506                 LIST_REMOVE(mtr_free, next);
6507         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6508                 rte_spinlock_unlock(&pools_mng->mtrsl);
6509                 return 0;
6510         }
6511         mtr_free->state = ASO_METER_WAIT;
6512         rte_spinlock_unlock(&pools_mng->mtrsl);
6513         pool = container_of(mtr_free,
6514                         struct mlx5_aso_mtr_pool,
6515                         mtrs[mtr_free->offset]);
6516         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6517         if (!mtr_free->fm.meter_action) {
6518 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6519                 struct rte_flow_error error;
6520                 uint8_t reg_id;
6521
6522                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6523                 mtr_free->fm.meter_action =
6524                         mlx5_glue->dv_create_flow_action_aso
6525                                                 (priv->sh->rx_domain,
6526                                                  pool->devx_obj->obj,
6527                                                  mtr_free->offset,
6528                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6529                                                  reg_id - REG_C_0);
6530 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6531                 if (!mtr_free->fm.meter_action) {
6532                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6533                         return 0;
6534                 }
6535         }
6536         return mtr_idx;
6537 }
6538
6539 /**
6540  * Verify the @p attributes will be correctly understood by the NIC and store
6541  * them in the @p flow if everything is correct.
6542  *
6543  * @param[in] dev
6544  *   Pointer to dev struct.
6545  * @param[in] attributes
6546  *   Pointer to flow attributes
6547  * @param[in] external
6548  *   This flow rule is created by request external to PMD.
6549  * @param[out] error
6550  *   Pointer to error structure.
6551  *
6552  * @return
6553  *   - 0 on success and non root table.
6554  *   - 1 on success and root table.
6555  *   - a negative errno value otherwise and rte_errno is set.
6556  */
6557 static int
6558 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6559                             const struct mlx5_flow_tunnel *tunnel,
6560                             const struct rte_flow_attr *attributes,
6561                             const struct flow_grp_info *grp_info,
6562                             struct rte_flow_error *error)
6563 {
6564         struct mlx5_priv *priv = dev->data->dev_private;
6565         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6566         int ret = 0;
6567
6568 #ifndef HAVE_MLX5DV_DR
6569         RTE_SET_USED(tunnel);
6570         RTE_SET_USED(grp_info);
6571         if (attributes->group)
6572                 return rte_flow_error_set(error, ENOTSUP,
6573                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6574                                           NULL,
6575                                           "groups are not supported");
6576 #else
6577         uint32_t table = 0;
6578
6579         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6580                                        grp_info, error);
6581         if (ret)
6582                 return ret;
6583         if (!table)
6584                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6585 #endif
6586         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6587             attributes->priority > lowest_priority)
6588                 return rte_flow_error_set(error, ENOTSUP,
6589                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6590                                           NULL,
6591                                           "priority out of range");
6592         if (attributes->transfer) {
6593                 if (!priv->config.dv_esw_en)
6594                         return rte_flow_error_set
6595                                 (error, ENOTSUP,
6596                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6597                                  "E-Switch dr is not supported");
6598                 if (!(priv->representor || priv->master))
6599                         return rte_flow_error_set
6600                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6601                                  NULL, "E-Switch configuration can only be"
6602                                  " done by a master or a representor device");
6603                 if (attributes->egress)
6604                         return rte_flow_error_set
6605                                 (error, ENOTSUP,
6606                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6607                                  "egress is not supported");
6608         }
6609         if (!(attributes->egress ^ attributes->ingress))
6610                 return rte_flow_error_set(error, ENOTSUP,
6611                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6612                                           "must specify exactly one of "
6613                                           "ingress or egress");
6614         return ret;
6615 }
6616
6617 static int
6618 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6619                         int64_t pattern_flags, uint64_t l3_flags,
6620                         uint64_t l4_flags, uint64_t ip4_flag,
6621                         struct rte_flow_error *error)
6622 {
6623         if (mask->l3_ok && !(pattern_flags & l3_flags))
6624                 return rte_flow_error_set(error, EINVAL,
6625                                           RTE_FLOW_ERROR_TYPE_ITEM,
6626                                           NULL, "missing L3 protocol");
6627
6628         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6629                 return rte_flow_error_set(error, EINVAL,
6630                                           RTE_FLOW_ERROR_TYPE_ITEM,
6631                                           NULL, "missing IPv4 protocol");
6632
6633         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6634                 return rte_flow_error_set(error, EINVAL,
6635                                           RTE_FLOW_ERROR_TYPE_ITEM,
6636                                           NULL, "missing L4 protocol");
6637
6638         return 0;
6639 }
6640
6641 static int
6642 flow_dv_validate_item_integrity_post(const struct
6643                                      rte_flow_item *integrity_items[2],
6644                                      int64_t pattern_flags,
6645                                      struct rte_flow_error *error)
6646 {
6647         const struct rte_flow_item_integrity *mask;
6648         int ret;
6649
6650         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6651                 mask = (typeof(mask))integrity_items[0]->mask;
6652                 ret = validate_integrity_bits(mask, pattern_flags,
6653                                               MLX5_FLOW_LAYER_OUTER_L3,
6654                                               MLX5_FLOW_LAYER_OUTER_L4,
6655                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6656                                               error);
6657                 if (ret)
6658                         return ret;
6659         }
6660         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6661                 mask = (typeof(mask))integrity_items[1]->mask;
6662                 ret = validate_integrity_bits(mask, pattern_flags,
6663                                               MLX5_FLOW_LAYER_INNER_L3,
6664                                               MLX5_FLOW_LAYER_INNER_L4,
6665                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6666                                               error);
6667                 if (ret)
6668                         return ret;
6669         }
6670         return 0;
6671 }
6672
6673 static int
6674 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6675                                 const struct rte_flow_item *integrity_item,
6676                                 uint64_t pattern_flags, uint64_t *last_item,
6677                                 const struct rte_flow_item *integrity_items[2],
6678                                 struct rte_flow_error *error)
6679 {
6680         struct mlx5_priv *priv = dev->data->dev_private;
6681         const struct rte_flow_item_integrity *mask = (typeof(mask))
6682                                                      integrity_item->mask;
6683         const struct rte_flow_item_integrity *spec = (typeof(spec))
6684                                                      integrity_item->spec;
6685
6686         if (!priv->config.hca_attr.pkt_integrity_match)
6687                 return rte_flow_error_set(error, ENOTSUP,
6688                                           RTE_FLOW_ERROR_TYPE_ITEM,
6689                                           integrity_item,
6690                                           "packet integrity integrity_item not supported");
6691         if (!spec)
6692                 return rte_flow_error_set(error, ENOTSUP,
6693                                           RTE_FLOW_ERROR_TYPE_ITEM,
6694                                           integrity_item,
6695                                           "no spec for integrity item");
6696         if (!mask)
6697                 mask = &rte_flow_item_integrity_mask;
6698         if (!mlx5_validate_integrity_item(mask))
6699                 return rte_flow_error_set(error, ENOTSUP,
6700                                           RTE_FLOW_ERROR_TYPE_ITEM,
6701                                           integrity_item,
6702                                           "unsupported integrity filter");
6703         if (spec->level > 1) {
6704                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6705                         return rte_flow_error_set
6706                                 (error, ENOTSUP,
6707                                  RTE_FLOW_ERROR_TYPE_ITEM,
6708                                  NULL, "multiple inner integrity items not supported");
6709                 integrity_items[1] = integrity_item;
6710                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6711         } else {
6712                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6713                         return rte_flow_error_set
6714                                 (error, ENOTSUP,
6715                                  RTE_FLOW_ERROR_TYPE_ITEM,
6716                                  NULL, "multiple outer integrity items not supported");
6717                 integrity_items[0] = integrity_item;
6718                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6719         }
6720         return 0;
6721 }
6722
6723 static int
6724 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6725                            const struct rte_flow_item *item,
6726                            uint64_t item_flags,
6727                            uint64_t *last_item,
6728                            bool is_inner,
6729                            struct rte_flow_error *error)
6730 {
6731         const struct rte_flow_item_flex *flow_spec = item->spec;
6732         const struct rte_flow_item_flex *flow_mask = item->mask;
6733         struct mlx5_flex_item *flex;
6734
6735         if (!flow_spec)
6736                 return rte_flow_error_set(error, EINVAL,
6737                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6738                                           "flex flow item spec cannot be NULL");
6739         if (!flow_mask)
6740                 return rte_flow_error_set(error, EINVAL,
6741                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6742                                           "flex flow item mask cannot be NULL");
6743         if (item->last)
6744                 return rte_flow_error_set(error, ENOTSUP,
6745                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6746                                           "flex flow item last not supported");
6747         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6748                 return rte_flow_error_set(error, EINVAL,
6749                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6750                                           "invalid flex flow item handle");
6751         flex = (struct mlx5_flex_item *)flow_spec->handle;
6752         switch (flex->tunnel_mode) {
6753         case FLEX_TUNNEL_MODE_SINGLE:
6754                 if (item_flags &
6755                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6756                         rte_flow_error_set(error, EINVAL,
6757                                            RTE_FLOW_ERROR_TYPE_ITEM,
6758                                            NULL, "multiple flex items not supported");
6759                 break;
6760         case FLEX_TUNNEL_MODE_OUTER:
6761                 if (is_inner)
6762                         rte_flow_error_set(error, EINVAL,
6763                                            RTE_FLOW_ERROR_TYPE_ITEM,
6764                                            NULL, "inner flex item was not configured");
6765                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6766                         rte_flow_error_set(error, ENOTSUP,
6767                                            RTE_FLOW_ERROR_TYPE_ITEM,
6768                                            NULL, "multiple flex items not supported");
6769                 break;
6770         case FLEX_TUNNEL_MODE_INNER:
6771                 if (!is_inner)
6772                         rte_flow_error_set(error, EINVAL,
6773                                            RTE_FLOW_ERROR_TYPE_ITEM,
6774                                            NULL, "outer flex item was not configured");
6775                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6776                         rte_flow_error_set(error, EINVAL,
6777                                            RTE_FLOW_ERROR_TYPE_ITEM,
6778                                            NULL, "multiple flex items not supported");
6779                 break;
6780         case FLEX_TUNNEL_MODE_MULTI:
6781                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6782                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6783                         rte_flow_error_set(error, EINVAL,
6784                                            RTE_FLOW_ERROR_TYPE_ITEM,
6785                                            NULL, "multiple flex items not supported");
6786                 }
6787                 break;
6788         case FLEX_TUNNEL_MODE_TUNNEL:
6789                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6790                         rte_flow_error_set(error, EINVAL,
6791                                            RTE_FLOW_ERROR_TYPE_ITEM,
6792                                            NULL, "multiple flex tunnel items not supported");
6793                 break;
6794         default:
6795                 rte_flow_error_set(error, EINVAL,
6796                                    RTE_FLOW_ERROR_TYPE_ITEM,
6797                                    NULL, "invalid flex item configuration");
6798         }
6799         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6800                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6801                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6802         return 0;
6803 }
6804
6805 /**
6806  * Internal validation function. For validating both actions and items.
6807  *
6808  * @param[in] dev
6809  *   Pointer to the rte_eth_dev structure.
6810  * @param[in] attr
6811  *   Pointer to the flow attributes.
6812  * @param[in] items
6813  *   Pointer to the list of items.
6814  * @param[in] actions
6815  *   Pointer to the list of actions.
6816  * @param[in] external
6817  *   This flow rule is created by request external to PMD.
6818  * @param[in] hairpin
6819  *   Number of hairpin TX actions, 0 means classic flow.
6820  * @param[out] error
6821  *   Pointer to the error structure.
6822  *
6823  * @return
6824  *   0 on success, a negative errno value otherwise and rte_errno is set.
6825  */
6826 static int
6827 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6828                  const struct rte_flow_item items[],
6829                  const struct rte_flow_action actions[],
6830                  bool external, int hairpin, struct rte_flow_error *error)
6831 {
6832         int ret;
6833         uint64_t action_flags = 0;
6834         uint64_t item_flags = 0;
6835         uint64_t last_item = 0;
6836         uint8_t next_protocol = 0xff;
6837         uint16_t ether_type = 0;
6838         int actions_n = 0;
6839         uint8_t item_ipv6_proto = 0;
6840         int fdb_mirror_limit = 0;
6841         int modify_after_mirror = 0;
6842         const struct rte_flow_item *geneve_item = NULL;
6843         const struct rte_flow_item *gre_item = NULL;
6844         const struct rte_flow_item *gtp_item = NULL;
6845         const struct rte_flow_action_raw_decap *decap;
6846         const struct rte_flow_action_raw_encap *encap;
6847         const struct rte_flow_action_rss *rss = NULL;
6848         const struct rte_flow_action_rss *sample_rss = NULL;
6849         const struct rte_flow_action_count *sample_count = NULL;
6850         const struct rte_flow_item_tcp nic_tcp_mask = {
6851                 .hdr = {
6852                         .tcp_flags = 0xFF,
6853                         .src_port = RTE_BE16(UINT16_MAX),
6854                         .dst_port = RTE_BE16(UINT16_MAX),
6855                 }
6856         };
6857         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6858                 .hdr = {
6859                         .src_addr =
6860                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6861                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6862                         .dst_addr =
6863                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6864                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6865                         .vtc_flow = RTE_BE32(0xffffffff),
6866                         .proto = 0xff,
6867                         .hop_limits = 0xff,
6868                 },
6869                 .has_frag_ext = 1,
6870         };
6871         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6872                 .hdr = {
6873                         .common = {
6874                                 .u32 =
6875                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6876                                         .type = 0xFF,
6877                                         }).u32),
6878                         },
6879                         .dummy[0] = 0xffffffff,
6880                 },
6881         };
6882         struct mlx5_priv *priv = dev->data->dev_private;
6883         struct mlx5_dev_config *dev_conf = &priv->config;
6884         uint16_t queue_index = 0xFFFF;
6885         const struct rte_flow_item_vlan *vlan_m = NULL;
6886         uint32_t rw_act_num = 0;
6887         uint64_t is_root;
6888         const struct mlx5_flow_tunnel *tunnel;
6889         enum mlx5_tof_rule_type tof_rule_type;
6890         struct flow_grp_info grp_info = {
6891                 .external = !!external,
6892                 .transfer = !!attr->transfer,
6893                 .fdb_def_rule = !!priv->fdb_def_rule,
6894                 .std_tbl_fix = true,
6895         };
6896         const struct rte_eth_hairpin_conf *conf;
6897         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6898         const struct rte_flow_item *port_id_item = NULL;
6899         bool def_policy = false;
6900         uint16_t udp_dport = 0;
6901
6902         if (items == NULL)
6903                 return -1;
6904         tunnel = is_tunnel_offload_active(dev) ?
6905                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6906         if (tunnel) {
6907                 if (!priv->config.dv_flow_en)
6908                         return rte_flow_error_set
6909                                 (error, ENOTSUP,
6910                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6911                                  NULL, "tunnel offload requires DV flow interface");
6912                 if (priv->representor)
6913                         return rte_flow_error_set
6914                                 (error, ENOTSUP,
6915                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6916                                  NULL, "decap not supported for VF representor");
6917                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6918                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6919                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6920                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6921                                         MLX5_FLOW_ACTION_DECAP;
6922                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6923                                         (dev, attr, tunnel, tof_rule_type);
6924         }
6925         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6926         if (ret < 0)
6927                 return ret;
6928         is_root = (uint64_t)ret;
6929         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6930                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6931                 int type = items->type;
6932
6933                 if (!mlx5_flow_os_item_supported(type))
6934                         return rte_flow_error_set(error, ENOTSUP,
6935                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6936                                                   NULL, "item not supported");
6937                 switch (type) {
6938                 case RTE_FLOW_ITEM_TYPE_VOID:
6939                         break;
6940                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6941                         ret = flow_dv_validate_item_port_id
6942                                         (dev, items, attr, item_flags, error);
6943                         if (ret < 0)
6944                                 return ret;
6945                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6946                         port_id_item = items;
6947                         break;
6948                 case RTE_FLOW_ITEM_TYPE_ETH:
6949                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6950                                                           true, error);
6951                         if (ret < 0)
6952                                 return ret;
6953                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6954                                              MLX5_FLOW_LAYER_OUTER_L2;
6955                         if (items->mask != NULL && items->spec != NULL) {
6956                                 ether_type =
6957                                         ((const struct rte_flow_item_eth *)
6958                                          items->spec)->type;
6959                                 ether_type &=
6960                                         ((const struct rte_flow_item_eth *)
6961                                          items->mask)->type;
6962                                 ether_type = rte_be_to_cpu_16(ether_type);
6963                         } else {
6964                                 ether_type = 0;
6965                         }
6966                         break;
6967                 case RTE_FLOW_ITEM_TYPE_VLAN:
6968                         ret = flow_dv_validate_item_vlan(items, item_flags,
6969                                                          dev, error);
6970                         if (ret < 0)
6971                                 return ret;
6972                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6973                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6974                         if (items->mask != NULL && items->spec != NULL) {
6975                                 ether_type =
6976                                         ((const struct rte_flow_item_vlan *)
6977                                          items->spec)->inner_type;
6978                                 ether_type &=
6979                                         ((const struct rte_flow_item_vlan *)
6980                                          items->mask)->inner_type;
6981                                 ether_type = rte_be_to_cpu_16(ether_type);
6982                         } else {
6983                                 ether_type = 0;
6984                         }
6985                         /* Store outer VLAN mask for of_push_vlan action. */
6986                         if (!tunnel)
6987                                 vlan_m = items->mask;
6988                         break;
6989                 case RTE_FLOW_ITEM_TYPE_IPV4:
6990                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6991                                                   &item_flags, &tunnel);
6992                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6993                                                          last_item, ether_type,
6994                                                          error);
6995                         if (ret < 0)
6996                                 return ret;
6997                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6998                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6999                         if (items->mask != NULL &&
7000                             ((const struct rte_flow_item_ipv4 *)
7001                              items->mask)->hdr.next_proto_id) {
7002                                 next_protocol =
7003                                         ((const struct rte_flow_item_ipv4 *)
7004                                          (items->spec))->hdr.next_proto_id;
7005                                 next_protocol &=
7006                                         ((const struct rte_flow_item_ipv4 *)
7007                                          (items->mask))->hdr.next_proto_id;
7008                         } else {
7009                                 /* Reset for inner layer. */
7010                                 next_protocol = 0xff;
7011                         }
7012                         break;
7013                 case RTE_FLOW_ITEM_TYPE_IPV6:
7014                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7015                                                   &item_flags, &tunnel);
7016                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7017                                                            last_item,
7018                                                            ether_type,
7019                                                            &nic_ipv6_mask,
7020                                                            error);
7021                         if (ret < 0)
7022                                 return ret;
7023                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7024                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7025                         if (items->mask != NULL &&
7026                             ((const struct rte_flow_item_ipv6 *)
7027                              items->mask)->hdr.proto) {
7028                                 item_ipv6_proto =
7029                                         ((const struct rte_flow_item_ipv6 *)
7030                                          items->spec)->hdr.proto;
7031                                 next_protocol =
7032                                         ((const struct rte_flow_item_ipv6 *)
7033                                          items->spec)->hdr.proto;
7034                                 next_protocol &=
7035                                         ((const struct rte_flow_item_ipv6 *)
7036                                          items->mask)->hdr.proto;
7037                         } else {
7038                                 /* Reset for inner layer. */
7039                                 next_protocol = 0xff;
7040                         }
7041                         break;
7042                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7043                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7044                                                                   item_flags,
7045                                                                   error);
7046                         if (ret < 0)
7047                                 return ret;
7048                         last_item = tunnel ?
7049                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7050                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7051                         if (items->mask != NULL &&
7052                             ((const struct rte_flow_item_ipv6_frag_ext *)
7053                              items->mask)->hdr.next_header) {
7054                                 next_protocol =
7055                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7056                                  items->spec)->hdr.next_header;
7057                                 next_protocol &=
7058                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7059                                  items->mask)->hdr.next_header;
7060                         } else {
7061                                 /* Reset for inner layer. */
7062                                 next_protocol = 0xff;
7063                         }
7064                         break;
7065                 case RTE_FLOW_ITEM_TYPE_TCP:
7066                         ret = mlx5_flow_validate_item_tcp
7067                                                 (items, item_flags,
7068                                                  next_protocol,
7069                                                  &nic_tcp_mask,
7070                                                  error);
7071                         if (ret < 0)
7072                                 return ret;
7073                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7074                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7075                         break;
7076                 case RTE_FLOW_ITEM_TYPE_UDP:
7077                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7078                                                           next_protocol,
7079                                                           error);
7080                         const struct rte_flow_item_udp *spec = items->spec;
7081                         const struct rte_flow_item_udp *mask = items->mask;
7082                         if (!mask)
7083                                 mask = &rte_flow_item_udp_mask;
7084                         if (spec != NULL)
7085                                 udp_dport = rte_be_to_cpu_16
7086                                                 (spec->hdr.dst_port &
7087                                                  mask->hdr.dst_port);
7088                         if (ret < 0)
7089                                 return ret;
7090                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7091                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7092                         break;
7093                 case RTE_FLOW_ITEM_TYPE_GRE:
7094                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7095                                                           next_protocol, error);
7096                         if (ret < 0)
7097                                 return ret;
7098                         gre_item = items;
7099                         last_item = MLX5_FLOW_LAYER_GRE;
7100                         break;
7101                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7102                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7103                                                             next_protocol,
7104                                                             error);
7105                         if (ret < 0)
7106                                 return ret;
7107                         last_item = MLX5_FLOW_LAYER_NVGRE;
7108                         break;
7109                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7110                         ret = mlx5_flow_validate_item_gre_key
7111                                 (items, item_flags, gre_item, error);
7112                         if (ret < 0)
7113                                 return ret;
7114                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7115                         break;
7116                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7117                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7118                                                             items, item_flags,
7119                                                             attr, error);
7120                         if (ret < 0)
7121                                 return ret;
7122                         last_item = MLX5_FLOW_LAYER_VXLAN;
7123                         break;
7124                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7125                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7126                                                                 item_flags, dev,
7127                                                                 error);
7128                         if (ret < 0)
7129                                 return ret;
7130                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7131                         break;
7132                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7133                         ret = mlx5_flow_validate_item_geneve(items,
7134                                                              item_flags, dev,
7135                                                              error);
7136                         if (ret < 0)
7137                                 return ret;
7138                         geneve_item = items;
7139                         last_item = MLX5_FLOW_LAYER_GENEVE;
7140                         break;
7141                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7142                         ret = mlx5_flow_validate_item_geneve_opt(items,
7143                                                                  last_item,
7144                                                                  geneve_item,
7145                                                                  dev,
7146                                                                  error);
7147                         if (ret < 0)
7148                                 return ret;
7149                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7150                         break;
7151                 case RTE_FLOW_ITEM_TYPE_MPLS:
7152                         ret = mlx5_flow_validate_item_mpls(dev, items,
7153                                                            item_flags,
7154                                                            last_item, error);
7155                         if (ret < 0)
7156                                 return ret;
7157                         last_item = MLX5_FLOW_LAYER_MPLS;
7158                         break;
7159
7160                 case RTE_FLOW_ITEM_TYPE_MARK:
7161                         ret = flow_dv_validate_item_mark(dev, items, attr,
7162                                                          error);
7163                         if (ret < 0)
7164                                 return ret;
7165                         last_item = MLX5_FLOW_ITEM_MARK;
7166                         break;
7167                 case RTE_FLOW_ITEM_TYPE_META:
7168                         ret = flow_dv_validate_item_meta(dev, items, attr,
7169                                                          error);
7170                         if (ret < 0)
7171                                 return ret;
7172                         last_item = MLX5_FLOW_ITEM_METADATA;
7173                         break;
7174                 case RTE_FLOW_ITEM_TYPE_ICMP:
7175                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7176                                                            next_protocol,
7177                                                            error);
7178                         if (ret < 0)
7179                                 return ret;
7180                         last_item = MLX5_FLOW_LAYER_ICMP;
7181                         break;
7182                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7183                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7184                                                             next_protocol,
7185                                                             error);
7186                         if (ret < 0)
7187                                 return ret;
7188                         item_ipv6_proto = IPPROTO_ICMPV6;
7189                         last_item = MLX5_FLOW_LAYER_ICMP6;
7190                         break;
7191                 case RTE_FLOW_ITEM_TYPE_TAG:
7192                         ret = flow_dv_validate_item_tag(dev, items,
7193                                                         attr, error);
7194                         if (ret < 0)
7195                                 return ret;
7196                         last_item = MLX5_FLOW_ITEM_TAG;
7197                         break;
7198                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7199                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7200                         break;
7201                 case RTE_FLOW_ITEM_TYPE_GTP:
7202                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7203                                                         error);
7204                         if (ret < 0)
7205                                 return ret;
7206                         gtp_item = items;
7207                         last_item = MLX5_FLOW_LAYER_GTP;
7208                         break;
7209                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7210                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7211                                                             gtp_item, attr,
7212                                                             error);
7213                         if (ret < 0)
7214                                 return ret;
7215                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7216                         break;
7217                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7218                         /* Capacity will be checked in the translate stage. */
7219                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7220                                                             last_item,
7221                                                             ether_type,
7222                                                             &nic_ecpri_mask,
7223                                                             error);
7224                         if (ret < 0)
7225                                 return ret;
7226                         last_item = MLX5_FLOW_LAYER_ECPRI;
7227                         break;
7228                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7229                         ret = flow_dv_validate_item_integrity(dev, items,
7230                                                               item_flags,
7231                                                               &last_item,
7232                                                               integrity_items,
7233                                                               error);
7234                         if (ret < 0)
7235                                 return ret;
7236                         break;
7237                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7238                         ret = flow_dv_validate_item_aso_ct(dev, items,
7239                                                            &item_flags, error);
7240                         if (ret < 0)
7241                                 return ret;
7242                         break;
7243                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7244                         /* tunnel offload item was processed before
7245                          * list it here as a supported type
7246                          */
7247                         break;
7248                 case RTE_FLOW_ITEM_TYPE_FLEX:
7249                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7250                                                          &last_item,
7251                                                          tunnel != 0, error);
7252                         if (ret < 0)
7253                                 return ret;
7254                         break;
7255                 default:
7256                         return rte_flow_error_set(error, ENOTSUP,
7257                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7258                                                   NULL, "item not supported");
7259                 }
7260                 item_flags |= last_item;
7261         }
7262         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7263                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7264                                                            item_flags, error);
7265                 if (ret)
7266                         return ret;
7267         }
7268         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7269                 int type = actions->type;
7270                 bool shared_count = false;
7271
7272                 if (!mlx5_flow_os_action_supported(type))
7273                         return rte_flow_error_set(error, ENOTSUP,
7274                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7275                                                   actions,
7276                                                   "action not supported");
7277                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7278                         return rte_flow_error_set(error, ENOTSUP,
7279                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7280                                                   actions, "too many actions");
7281                 if (action_flags &
7282                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7283                         return rte_flow_error_set(error, ENOTSUP,
7284                                 RTE_FLOW_ERROR_TYPE_ACTION,
7285                                 NULL, "meter action with policy "
7286                                 "must be the last action");
7287                 switch (type) {
7288                 case RTE_FLOW_ACTION_TYPE_VOID:
7289                         break;
7290                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7291                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7292                         ret = flow_dv_validate_action_port_id(dev,
7293                                                               action_flags,
7294                                                               actions,
7295                                                               attr,
7296                                                               error);
7297                         if (ret)
7298                                 return ret;
7299                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7300                         ++actions_n;
7301                         break;
7302                 case RTE_FLOW_ACTION_TYPE_FLAG:
7303                         ret = flow_dv_validate_action_flag(dev, action_flags,
7304                                                            attr, error);
7305                         if (ret < 0)
7306                                 return ret;
7307                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7308                                 /* Count all modify-header actions as one. */
7309                                 if (!(action_flags &
7310                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7311                                         ++actions_n;
7312                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7313                                                 MLX5_FLOW_ACTION_MARK_EXT;
7314                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7315                                         modify_after_mirror = 1;
7316
7317                         } else {
7318                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7319                                 ++actions_n;
7320                         }
7321                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7322                         break;
7323                 case RTE_FLOW_ACTION_TYPE_MARK:
7324                         ret = flow_dv_validate_action_mark(dev, actions,
7325                                                            action_flags,
7326                                                            attr, error);
7327                         if (ret < 0)
7328                                 return ret;
7329                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7330                                 /* Count all modify-header actions as one. */
7331                                 if (!(action_flags &
7332                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7333                                         ++actions_n;
7334                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7335                                                 MLX5_FLOW_ACTION_MARK_EXT;
7336                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7337                                         modify_after_mirror = 1;
7338                         } else {
7339                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7340                                 ++actions_n;
7341                         }
7342                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7343                         break;
7344                 case RTE_FLOW_ACTION_TYPE_SET_META:
7345                         ret = flow_dv_validate_action_set_meta(dev, actions,
7346                                                                action_flags,
7347                                                                attr, error);
7348                         if (ret < 0)
7349                                 return ret;
7350                         /* Count all modify-header actions as one action. */
7351                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7352                                 ++actions_n;
7353                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7354                                 modify_after_mirror = 1;
7355                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7356                         rw_act_num += MLX5_ACT_NUM_SET_META;
7357                         break;
7358                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7359                         ret = flow_dv_validate_action_set_tag(dev, actions,
7360                                                               action_flags,
7361                                                               attr, error);
7362                         if (ret < 0)
7363                                 return ret;
7364                         /* Count all modify-header actions as one action. */
7365                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7366                                 ++actions_n;
7367                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7368                                 modify_after_mirror = 1;
7369                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7370                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7371                         break;
7372                 case RTE_FLOW_ACTION_TYPE_DROP:
7373                         ret = mlx5_flow_validate_action_drop(action_flags,
7374                                                              attr, error);
7375                         if (ret < 0)
7376                                 return ret;
7377                         action_flags |= MLX5_FLOW_ACTION_DROP;
7378                         ++actions_n;
7379                         break;
7380                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7381                         ret = mlx5_flow_validate_action_queue(actions,
7382                                                               action_flags, dev,
7383                                                               attr, error);
7384                         if (ret < 0)
7385                                 return ret;
7386                         queue_index = ((const struct rte_flow_action_queue *)
7387                                                         (actions->conf))->index;
7388                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7389                         ++actions_n;
7390                         break;
7391                 case RTE_FLOW_ACTION_TYPE_RSS:
7392                         rss = actions->conf;
7393                         ret = mlx5_flow_validate_action_rss(actions,
7394                                                             action_flags, dev,
7395                                                             attr, item_flags,
7396                                                             error);
7397                         if (ret < 0)
7398                                 return ret;
7399                         if (rss && sample_rss &&
7400                             (sample_rss->level != rss->level ||
7401                             sample_rss->types != rss->types))
7402                                 return rte_flow_error_set(error, ENOTSUP,
7403                                         RTE_FLOW_ERROR_TYPE_ACTION,
7404                                         NULL,
7405                                         "Can't use the different RSS types "
7406                                         "or level in the same flow");
7407                         if (rss != NULL && rss->queue_num)
7408                                 queue_index = rss->queue[0];
7409                         action_flags |= MLX5_FLOW_ACTION_RSS;
7410                         ++actions_n;
7411                         break;
7412                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7413                         ret =
7414                         mlx5_flow_validate_action_default_miss(action_flags,
7415                                         attr, error);
7416                         if (ret < 0)
7417                                 return ret;
7418                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7419                         ++actions_n;
7420                         break;
7421                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7422                         shared_count = true;
7423                         /* fall-through. */
7424                 case RTE_FLOW_ACTION_TYPE_COUNT:
7425                         ret = flow_dv_validate_action_count(dev, shared_count,
7426                                                             action_flags,
7427                                                             error);
7428                         if (ret < 0)
7429                                 return ret;
7430                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7431                         ++actions_n;
7432                         break;
7433                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7434                         if (flow_dv_validate_action_pop_vlan(dev,
7435                                                              action_flags,
7436                                                              actions,
7437                                                              item_flags, attr,
7438                                                              error))
7439                                 return -rte_errno;
7440                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7441                                 modify_after_mirror = 1;
7442                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7443                         ++actions_n;
7444                         break;
7445                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7446                         ret = flow_dv_validate_action_push_vlan(dev,
7447                                                                 action_flags,
7448                                                                 vlan_m,
7449                                                                 actions, attr,
7450                                                                 error);
7451                         if (ret < 0)
7452                                 return ret;
7453                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7454                                 modify_after_mirror = 1;
7455                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7456                         ++actions_n;
7457                         break;
7458                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7459                         ret = flow_dv_validate_action_set_vlan_pcp
7460                                                 (action_flags, actions, error);
7461                         if (ret < 0)
7462                                 return ret;
7463                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7464                                 modify_after_mirror = 1;
7465                         /* Count PCP with push_vlan command. */
7466                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7467                         break;
7468                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7469                         ret = flow_dv_validate_action_set_vlan_vid
7470                                                 (item_flags, action_flags,
7471                                                  actions, error);
7472                         if (ret < 0)
7473                                 return ret;
7474                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7475                                 modify_after_mirror = 1;
7476                         /* Count VID with push_vlan command. */
7477                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7478                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7479                         break;
7480                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7481                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7482                         ret = flow_dv_validate_action_l2_encap(dev,
7483                                                                action_flags,
7484                                                                actions, attr,
7485                                                                error);
7486                         if (ret < 0)
7487                                 return ret;
7488                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7489                         ++actions_n;
7490                         break;
7491                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7492                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7493                         ret = flow_dv_validate_action_decap(dev, action_flags,
7494                                                             actions, item_flags,
7495                                                             attr, error);
7496                         if (ret < 0)
7497                                 return ret;
7498                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7499                                 modify_after_mirror = 1;
7500                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7501                         ++actions_n;
7502                         break;
7503                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7504                         ret = flow_dv_validate_action_raw_encap_decap
7505                                 (dev, NULL, actions->conf, attr, &action_flags,
7506                                  &actions_n, actions, item_flags, error);
7507                         if (ret < 0)
7508                                 return ret;
7509                         break;
7510                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7511                         decap = actions->conf;
7512                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7513                                 ;
7514                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7515                                 encap = NULL;
7516                                 actions--;
7517                         } else {
7518                                 encap = actions->conf;
7519                         }
7520                         ret = flow_dv_validate_action_raw_encap_decap
7521                                            (dev,
7522                                             decap ? decap : &empty_decap, encap,
7523                                             attr, &action_flags, &actions_n,
7524                                             actions, item_flags, error);
7525                         if (ret < 0)
7526                                 return ret;
7527                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7528                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7529                                 modify_after_mirror = 1;
7530                         break;
7531                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7532                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7533                         ret = flow_dv_validate_action_modify_mac(action_flags,
7534                                                                  actions,
7535                                                                  item_flags,
7536                                                                  error);
7537                         if (ret < 0)
7538                                 return ret;
7539                         /* Count all modify-header actions as one action. */
7540                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7541                                 ++actions_n;
7542                         action_flags |= actions->type ==
7543                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7544                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7545                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7546                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7547                                 modify_after_mirror = 1;
7548                         /*
7549                          * Even if the source and destination MAC addresses have
7550                          * overlap in the header with 4B alignment, the convert
7551                          * function will handle them separately and 4 SW actions
7552                          * will be created. And 2 actions will be added each
7553                          * time no matter how many bytes of address will be set.
7554                          */
7555                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7556                         break;
7557                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7558                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7559                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7560                                                                   actions,
7561                                                                   item_flags,
7562                                                                   error);
7563                         if (ret < 0)
7564                                 return ret;
7565                         /* Count all modify-header actions as one action. */
7566                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7567                                 ++actions_n;
7568                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7569                                 modify_after_mirror = 1;
7570                         action_flags |= actions->type ==
7571                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7572                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7573                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7574                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7575                         break;
7576                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7577                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7578                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7579                                                                   actions,
7580                                                                   item_flags,
7581                                                                   error);
7582                         if (ret < 0)
7583                                 return ret;
7584                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7585                                 return rte_flow_error_set(error, ENOTSUP,
7586                                         RTE_FLOW_ERROR_TYPE_ACTION,
7587                                         actions,
7588                                         "Can't change header "
7589                                         "with ICMPv6 proto");
7590                         /* Count all modify-header actions as one action. */
7591                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7592                                 ++actions_n;
7593                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7594                                 modify_after_mirror = 1;
7595                         action_flags |= actions->type ==
7596                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7597                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7598                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7599                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7600                         break;
7601                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7602                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7603                         ret = flow_dv_validate_action_modify_tp(action_flags,
7604                                                                 actions,
7605                                                                 item_flags,
7606                                                                 error);
7607                         if (ret < 0)
7608                                 return ret;
7609                         /* Count all modify-header actions as one action. */
7610                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7611                                 ++actions_n;
7612                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7613                                 modify_after_mirror = 1;
7614                         action_flags |= actions->type ==
7615                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7616                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7617                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7618                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7619                         break;
7620                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7621                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7622                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7623                                                                  actions,
7624                                                                  item_flags,
7625                                                                  error);
7626                         if (ret < 0)
7627                                 return ret;
7628                         /* Count all modify-header actions as one action. */
7629                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7630                                 ++actions_n;
7631                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7632                                 modify_after_mirror = 1;
7633                         action_flags |= actions->type ==
7634                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7635                                                 MLX5_FLOW_ACTION_SET_TTL :
7636                                                 MLX5_FLOW_ACTION_DEC_TTL;
7637                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7638                         break;
7639                 case RTE_FLOW_ACTION_TYPE_JUMP:
7640                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7641                                                            action_flags,
7642                                                            attr, external,
7643                                                            error);
7644                         if (ret)
7645                                 return ret;
7646                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7647                             fdb_mirror_limit)
7648                                 return rte_flow_error_set(error, EINVAL,
7649                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7650                                                   NULL,
7651                                                   "sample and jump action combination is not supported");
7652                         ++actions_n;
7653                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7654                         break;
7655                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7656                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7657                         ret = flow_dv_validate_action_modify_tcp_seq
7658                                                                 (action_flags,
7659                                                                  actions,
7660                                                                  item_flags,
7661                                                                  error);
7662                         if (ret < 0)
7663                                 return ret;
7664                         /* Count all modify-header actions as one action. */
7665                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7666                                 ++actions_n;
7667                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7668                                 modify_after_mirror = 1;
7669                         action_flags |= actions->type ==
7670                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7671                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7672                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7673                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7674                         break;
7675                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7676                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7677                         ret = flow_dv_validate_action_modify_tcp_ack
7678                                                                 (action_flags,
7679                                                                  actions,
7680                                                                  item_flags,
7681                                                                  error);
7682                         if (ret < 0)
7683                                 return ret;
7684                         /* Count all modify-header actions as one action. */
7685                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7686                                 ++actions_n;
7687                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7688                                 modify_after_mirror = 1;
7689                         action_flags |= actions->type ==
7690                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7691                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7692                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7693                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7694                         break;
7695                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7696                         break;
7697                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7698                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7699                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7700                         break;
7701                 case RTE_FLOW_ACTION_TYPE_METER:
7702                         ret = mlx5_flow_validate_action_meter(dev,
7703                                                               action_flags,
7704                                                               item_flags,
7705                                                               actions, attr,
7706                                                               port_id_item,
7707                                                               &def_policy,
7708                                                               error);
7709                         if (ret < 0)
7710                                 return ret;
7711                         action_flags |= MLX5_FLOW_ACTION_METER;
7712                         if (!def_policy)
7713                                 action_flags |=
7714                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7715                         ++actions_n;
7716                         /* Meter action will add one more TAG action. */
7717                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7718                         break;
7719                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7720                         if (!attr->transfer && !attr->group)
7721                                 return rte_flow_error_set(error, ENOTSUP,
7722                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7723                                                                            NULL,
7724                           "Shared ASO age action is not supported for group 0");
7725                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7726                                 return rte_flow_error_set
7727                                                   (error, EINVAL,
7728                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7729                                                    NULL,
7730                                                    "duplicate age actions set");
7731                         action_flags |= MLX5_FLOW_ACTION_AGE;
7732                         ++actions_n;
7733                         break;
7734                 case RTE_FLOW_ACTION_TYPE_AGE:
7735                         ret = flow_dv_validate_action_age(action_flags,
7736                                                           actions, dev,
7737                                                           error);
7738                         if (ret < 0)
7739                                 return ret;
7740                         /*
7741                          * Validate the regular AGE action (using counter)
7742                          * mutual exclusion with share counter actions.
7743                          */
7744                         if (!priv->sh->flow_hit_aso_en) {
7745                                 if (shared_count)
7746                                         return rte_flow_error_set
7747                                                 (error, EINVAL,
7748                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7749                                                 NULL,
7750                                                 "old age and shared count combination is not supported");
7751                                 if (sample_count)
7752                                         return rte_flow_error_set
7753                                                 (error, EINVAL,
7754                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7755                                                 NULL,
7756                                                 "old age action and count must be in the same sub flow");
7757                         }
7758                         action_flags |= MLX5_FLOW_ACTION_AGE;
7759                         ++actions_n;
7760                         break;
7761                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7762                         ret = flow_dv_validate_action_modify_ipv4_dscp
7763                                                          (action_flags,
7764                                                           actions,
7765                                                           item_flags,
7766                                                           error);
7767                         if (ret < 0)
7768                                 return ret;
7769                         /* Count all modify-header actions as one action. */
7770                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7771                                 ++actions_n;
7772                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7773                                 modify_after_mirror = 1;
7774                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7775                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7776                         break;
7777                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7778                         ret = flow_dv_validate_action_modify_ipv6_dscp
7779                                                                 (action_flags,
7780                                                                  actions,
7781                                                                  item_flags,
7782                                                                  error);
7783                         if (ret < 0)
7784                                 return ret;
7785                         /* Count all modify-header actions as one action. */
7786                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7787                                 ++actions_n;
7788                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7789                                 modify_after_mirror = 1;
7790                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7791                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7792                         break;
7793                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7794                         ret = flow_dv_validate_action_sample(&action_flags,
7795                                                              actions, dev,
7796                                                              attr, item_flags,
7797                                                              rss, &sample_rss,
7798                                                              &sample_count,
7799                                                              &fdb_mirror_limit,
7800                                                              error);
7801                         if (ret < 0)
7802                                 return ret;
7803                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7804                         ++actions_n;
7805                         break;
7806                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7807                         ret = flow_dv_validate_action_modify_field(dev,
7808                                                                    action_flags,
7809                                                                    actions,
7810                                                                    attr,
7811                                                                    error);
7812                         if (ret < 0)
7813                                 return ret;
7814                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7815                                 modify_after_mirror = 1;
7816                         /* Count all modify-header actions as one action. */
7817                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7818                                 ++actions_n;
7819                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7820                         rw_act_num += ret;
7821                         break;
7822                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7823                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7824                                                              item_flags, attr,
7825                                                              error);
7826                         if (ret < 0)
7827                                 return ret;
7828                         action_flags |= MLX5_FLOW_ACTION_CT;
7829                         break;
7830                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7831                         /* tunnel offload action was processed before
7832                          * list it here as a supported type
7833                          */
7834                         break;
7835                 default:
7836                         return rte_flow_error_set(error, ENOTSUP,
7837                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7838                                                   actions,
7839                                                   "action not supported");
7840                 }
7841         }
7842         /*
7843          * Validate actions in flow rules
7844          * - Explicit decap action is prohibited by the tunnel offload API.
7845          * - Drop action in tunnel steer rule is prohibited by the API.
7846          * - Application cannot use MARK action because it's value can mask
7847          *   tunnel default miss notification.
7848          * - JUMP in tunnel match rule has no support in current PMD
7849          *   implementation.
7850          * - TAG & META are reserved for future uses.
7851          */
7852         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7853                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7854                                             MLX5_FLOW_ACTION_MARK     |
7855                                             MLX5_FLOW_ACTION_SET_TAG  |
7856                                             MLX5_FLOW_ACTION_SET_META |
7857                                             MLX5_FLOW_ACTION_DROP;
7858
7859                 if (action_flags & bad_actions_mask)
7860                         return rte_flow_error_set
7861                                         (error, EINVAL,
7862                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7863                                         "Invalid RTE action in tunnel "
7864                                         "set decap rule");
7865                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7866                         return rte_flow_error_set
7867                                         (error, EINVAL,
7868                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7869                                         "tunnel set decap rule must terminate "
7870                                         "with JUMP");
7871                 if (!attr->ingress)
7872                         return rte_flow_error_set
7873                                         (error, EINVAL,
7874                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7875                                         "tunnel flows for ingress traffic only");
7876         }
7877         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7878                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7879                                             MLX5_FLOW_ACTION_MARK    |
7880                                             MLX5_FLOW_ACTION_SET_TAG |
7881                                             MLX5_FLOW_ACTION_SET_META;
7882
7883                 if (action_flags & bad_actions_mask)
7884                         return rte_flow_error_set
7885                                         (error, EINVAL,
7886                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7887                                         "Invalid RTE action in tunnel "
7888                                         "set match rule");
7889         }
7890         /*
7891          * Validate the drop action mutual exclusion with other actions.
7892          * Drop action is mutually-exclusive with any other action, except for
7893          * Count action.
7894          * Drop action compatibility with tunnel offload was already validated.
7895          */
7896         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7897                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7898         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7899             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7900                 return rte_flow_error_set(error, EINVAL,
7901                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7902                                           "Drop action is mutually-exclusive "
7903                                           "with any other action, except for "
7904                                           "Count action");
7905         /* Eswitch has few restrictions on using items and actions */
7906         if (attr->transfer) {
7907                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7908                     action_flags & MLX5_FLOW_ACTION_FLAG)
7909                         return rte_flow_error_set(error, ENOTSUP,
7910                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7911                                                   NULL,
7912                                                   "unsupported action FLAG");
7913                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7914                     action_flags & MLX5_FLOW_ACTION_MARK)
7915                         return rte_flow_error_set(error, ENOTSUP,
7916                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7917                                                   NULL,
7918                                                   "unsupported action MARK");
7919                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7920                         return rte_flow_error_set(error, ENOTSUP,
7921                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7922                                                   NULL,
7923                                                   "unsupported action QUEUE");
7924                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7925                         return rte_flow_error_set(error, ENOTSUP,
7926                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7927                                                   NULL,
7928                                                   "unsupported action RSS");
7929                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7930                         return rte_flow_error_set(error, EINVAL,
7931                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7932                                                   actions,
7933                                                   "no fate action is found");
7934         } else {
7935                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7936                         return rte_flow_error_set(error, EINVAL,
7937                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7938                                                   actions,
7939                                                   "no fate action is found");
7940         }
7941         /*
7942          * Continue validation for Xcap and VLAN actions.
7943          * If hairpin is working in explicit TX rule mode, there is no actions
7944          * splitting and the validation of hairpin ingress flow should be the
7945          * same as other standard flows.
7946          */
7947         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7948                              MLX5_FLOW_VLAN_ACTIONS)) &&
7949             (queue_index == 0xFFFF ||
7950              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7951              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7952              conf->tx_explicit != 0))) {
7953                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7954                     MLX5_FLOW_XCAP_ACTIONS)
7955                         return rte_flow_error_set(error, ENOTSUP,
7956                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7957                                                   NULL, "encap and decap "
7958                                                   "combination aren't supported");
7959                 if (!attr->transfer && attr->ingress) {
7960                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7961                                 return rte_flow_error_set
7962                                                 (error, ENOTSUP,
7963                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7964                                                  NULL, "encap is not supported"
7965                                                  " for ingress traffic");
7966                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7967                                 return rte_flow_error_set
7968                                                 (error, ENOTSUP,
7969                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7970                                                  NULL, "push VLAN action not "
7971                                                  "supported for ingress");
7972                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7973                                         MLX5_FLOW_VLAN_ACTIONS)
7974                                 return rte_flow_error_set
7975                                                 (error, ENOTSUP,
7976                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7977                                                  NULL, "no support for "
7978                                                  "multiple VLAN actions");
7979                 }
7980         }
7981         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7982                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7983                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7984                         attr->ingress)
7985                         return rte_flow_error_set
7986                                 (error, ENOTSUP,
7987                                 RTE_FLOW_ERROR_TYPE_ACTION,
7988                                 NULL, "fate action not supported for "
7989                                 "meter with policy");
7990                 if (attr->egress) {
7991                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7992                                 return rte_flow_error_set
7993                                         (error, ENOTSUP,
7994                                         RTE_FLOW_ERROR_TYPE_ACTION,
7995                                         NULL, "modify header action in egress "
7996                                         "cannot be done before meter action");
7997                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7998                                 return rte_flow_error_set
7999                                         (error, ENOTSUP,
8000                                         RTE_FLOW_ERROR_TYPE_ACTION,
8001                                         NULL, "encap action in egress "
8002                                         "cannot be done before meter action");
8003                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8004                                 return rte_flow_error_set
8005                                         (error, ENOTSUP,
8006                                         RTE_FLOW_ERROR_TYPE_ACTION,
8007                                         NULL, "push vlan action in egress "
8008                                         "cannot be done before meter action");
8009                 }
8010         }
8011         /*
8012          * Hairpin flow will add one more TAG action in TX implicit mode.
8013          * In TX explicit mode, there will be no hairpin flow ID.
8014          */
8015         if (hairpin > 0)
8016                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8017         /* extra metadata enabled: one more TAG action will be add. */
8018         if (dev_conf->dv_flow_en &&
8019             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8020             mlx5_flow_ext_mreg_supported(dev))
8021                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8022         if (rw_act_num >
8023                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8024                 return rte_flow_error_set(error, ENOTSUP,
8025                                           RTE_FLOW_ERROR_TYPE_ACTION,
8026                                           NULL, "too many header modify"
8027                                           " actions to support");
8028         }
8029         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8030         if (fdb_mirror_limit && modify_after_mirror)
8031                 return rte_flow_error_set(error, EINVAL,
8032                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8033                                 "sample before modify action is not supported");
8034         return 0;
8035 }
8036
8037 /**
8038  * Internal preparation function. Allocates the DV flow size,
8039  * this size is constant.
8040  *
8041  * @param[in] dev
8042  *   Pointer to the rte_eth_dev structure.
8043  * @param[in] attr
8044  *   Pointer to the flow attributes.
8045  * @param[in] items
8046  *   Pointer to the list of items.
8047  * @param[in] actions
8048  *   Pointer to the list of actions.
8049  * @param[out] error
8050  *   Pointer to the error structure.
8051  *
8052  * @return
8053  *   Pointer to mlx5_flow object on success,
8054  *   otherwise NULL and rte_errno is set.
8055  */
8056 static struct mlx5_flow *
8057 flow_dv_prepare(struct rte_eth_dev *dev,
8058                 const struct rte_flow_attr *attr __rte_unused,
8059                 const struct rte_flow_item items[] __rte_unused,
8060                 const struct rte_flow_action actions[] __rte_unused,
8061                 struct rte_flow_error *error)
8062 {
8063         uint32_t handle_idx = 0;
8064         struct mlx5_flow *dev_flow;
8065         struct mlx5_flow_handle *dev_handle;
8066         struct mlx5_priv *priv = dev->data->dev_private;
8067         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8068
8069         MLX5_ASSERT(wks);
8070         wks->skip_matcher_reg = 0;
8071         wks->policy = NULL;
8072         wks->final_policy = NULL;
8073         /* In case of corrupting the memory. */
8074         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8075                 rte_flow_error_set(error, ENOSPC,
8076                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8077                                    "not free temporary device flow");
8078                 return NULL;
8079         }
8080         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8081                                    &handle_idx);
8082         if (!dev_handle) {
8083                 rte_flow_error_set(error, ENOMEM,
8084                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8085                                    "not enough memory to create flow handle");
8086                 return NULL;
8087         }
8088         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8089         dev_flow = &wks->flows[wks->flow_idx++];
8090         memset(dev_flow, 0, sizeof(*dev_flow));
8091         dev_flow->handle = dev_handle;
8092         dev_flow->handle_idx = handle_idx;
8093         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8094         dev_flow->ingress = attr->ingress;
8095         dev_flow->dv.transfer = attr->transfer;
8096         return dev_flow;
8097 }
8098
8099 #ifdef RTE_LIBRTE_MLX5_DEBUG
8100 /**
8101  * Sanity check for match mask and value. Similar to check_valid_spec() in
8102  * kernel driver. If unmasked bit is present in value, it returns failure.
8103  *
8104  * @param match_mask
8105  *   pointer to match mask buffer.
8106  * @param match_value
8107  *   pointer to match value buffer.
8108  *
8109  * @return
8110  *   0 if valid, -EINVAL otherwise.
8111  */
8112 static int
8113 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8114 {
8115         uint8_t *m = match_mask;
8116         uint8_t *v = match_value;
8117         unsigned int i;
8118
8119         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8120                 if (v[i] & ~m[i]) {
8121                         DRV_LOG(ERR,
8122                                 "match_value differs from match_criteria"
8123                                 " %p[%u] != %p[%u]",
8124                                 match_value, i, match_mask, i);
8125                         return -EINVAL;
8126                 }
8127         }
8128         return 0;
8129 }
8130 #endif
8131
8132 /**
8133  * Add match of ip_version.
8134  *
8135  * @param[in] group
8136  *   Flow group.
8137  * @param[in] headers_v
8138  *   Values header pointer.
8139  * @param[in] headers_m
8140  *   Masks header pointer.
8141  * @param[in] ip_version
8142  *   The IP version to set.
8143  */
8144 static inline void
8145 flow_dv_set_match_ip_version(uint32_t group,
8146                              void *headers_v,
8147                              void *headers_m,
8148                              uint8_t ip_version)
8149 {
8150         if (group == 0)
8151                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8152         else
8153                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8154                          ip_version);
8155         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8156         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8157         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8158 }
8159
8160 /**
8161  * Add Ethernet item to matcher and to the value.
8162  *
8163  * @param[in, out] matcher
8164  *   Flow matcher.
8165  * @param[in, out] key
8166  *   Flow matcher value.
8167  * @param[in] item
8168  *   Flow pattern to translate.
8169  * @param[in] inner
8170  *   Item is inner pattern.
8171  */
8172 static void
8173 flow_dv_translate_item_eth(void *matcher, void *key,
8174                            const struct rte_flow_item *item, int inner,
8175                            uint32_t group)
8176 {
8177         const struct rte_flow_item_eth *eth_m = item->mask;
8178         const struct rte_flow_item_eth *eth_v = item->spec;
8179         const struct rte_flow_item_eth nic_mask = {
8180                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8181                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8182                 .type = RTE_BE16(0xffff),
8183                 .has_vlan = 0,
8184         };
8185         void *hdrs_m;
8186         void *hdrs_v;
8187         char *l24_v;
8188         unsigned int i;
8189
8190         if (!eth_v)
8191                 return;
8192         if (!eth_m)
8193                 eth_m = &nic_mask;
8194         if (inner) {
8195                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8196                                          inner_headers);
8197                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8198         } else {
8199                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8200                                          outer_headers);
8201                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8202         }
8203         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8204                &eth_m->dst, sizeof(eth_m->dst));
8205         /* The value must be in the range of the mask. */
8206         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8207         for (i = 0; i < sizeof(eth_m->dst); ++i)
8208                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8209         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8210                &eth_m->src, sizeof(eth_m->src));
8211         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8212         /* The value must be in the range of the mask. */
8213         for (i = 0; i < sizeof(eth_m->dst); ++i)
8214                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8215         /*
8216          * HW supports match on one Ethertype, the Ethertype following the last
8217          * VLAN tag of the packet (see PRM).
8218          * Set match on ethertype only if ETH header is not followed by VLAN.
8219          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8220          * ethertype, and use ip_version field instead.
8221          * eCPRI over Ether layer will use type value 0xAEFE.
8222          */
8223         if (eth_m->type == 0xFFFF) {
8224                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8225                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8226                 switch (eth_v->type) {
8227                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8228                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8229                         return;
8230                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8231                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8232                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8233                         return;
8234                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8235                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8236                         return;
8237                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8238                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8239                         return;
8240                 default:
8241                         break;
8242                 }
8243         }
8244         if (eth_m->has_vlan) {
8245                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8246                 if (eth_v->has_vlan) {
8247                         /*
8248                          * Here, when also has_more_vlan field in VLAN item is
8249                          * not set, only single-tagged packets will be matched.
8250                          */
8251                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8252                         return;
8253                 }
8254         }
8255         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8256                  rte_be_to_cpu_16(eth_m->type));
8257         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8258         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8259 }
8260
8261 /**
8262  * Add VLAN item to matcher and to the value.
8263  *
8264  * @param[in, out] dev_flow
8265  *   Flow descriptor.
8266  * @param[in, out] matcher
8267  *   Flow matcher.
8268  * @param[in, out] key
8269  *   Flow matcher value.
8270  * @param[in] item
8271  *   Flow pattern to translate.
8272  * @param[in] inner
8273  *   Item is inner pattern.
8274  */
8275 static void
8276 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8277                             void *matcher, void *key,
8278                             const struct rte_flow_item *item,
8279                             int inner, uint32_t group)
8280 {
8281         const struct rte_flow_item_vlan *vlan_m = item->mask;
8282         const struct rte_flow_item_vlan *vlan_v = item->spec;
8283         void *hdrs_m;
8284         void *hdrs_v;
8285         uint16_t tci_m;
8286         uint16_t tci_v;
8287
8288         if (inner) {
8289                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8290                                          inner_headers);
8291                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8292         } else {
8293                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8294                                          outer_headers);
8295                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8296                 /*
8297                  * This is workaround, masks are not supported,
8298                  * and pre-validated.
8299                  */
8300                 if (vlan_v)
8301                         dev_flow->handle->vf_vlan.tag =
8302                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8303         }
8304         /*
8305          * When VLAN item exists in flow, mark packet as tagged,
8306          * even if TCI is not specified.
8307          */
8308         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8309                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8310                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8311         }
8312         if (!vlan_v)
8313                 return;
8314         if (!vlan_m)
8315                 vlan_m = &rte_flow_item_vlan_mask;
8316         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8317         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8318         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8319         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8320         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8321         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8322         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8323         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8324         /*
8325          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8326          * ethertype, and use ip_version field instead.
8327          */
8328         if (vlan_m->inner_type == 0xFFFF) {
8329                 switch (vlan_v->inner_type) {
8330                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8331                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8332                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8333                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8334                         return;
8335                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8336                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8337                         return;
8338                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8339                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8340                         return;
8341                 default:
8342                         break;
8343                 }
8344         }
8345         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8346                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8347                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8348                 /* Only one vlan_tag bit can be set. */
8349                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8350                 return;
8351         }
8352         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8353                  rte_be_to_cpu_16(vlan_m->inner_type));
8354         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8355                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8356 }
8357
8358 /**
8359  * Add IPV4 item to matcher and to the value.
8360  *
8361  * @param[in, out] matcher
8362  *   Flow matcher.
8363  * @param[in, out] key
8364  *   Flow matcher value.
8365  * @param[in] item
8366  *   Flow pattern to translate.
8367  * @param[in] inner
8368  *   Item is inner pattern.
8369  * @param[in] group
8370  *   The group to insert the rule.
8371  */
8372 static void
8373 flow_dv_translate_item_ipv4(void *matcher, void *key,
8374                             const struct rte_flow_item *item,
8375                             int inner, uint32_t group)
8376 {
8377         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8378         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8379         const struct rte_flow_item_ipv4 nic_mask = {
8380                 .hdr = {
8381                         .src_addr = RTE_BE32(0xffffffff),
8382                         .dst_addr = RTE_BE32(0xffffffff),
8383                         .type_of_service = 0xff,
8384                         .next_proto_id = 0xff,
8385                         .time_to_live = 0xff,
8386                 },
8387         };
8388         void *headers_m;
8389         void *headers_v;
8390         char *l24_m;
8391         char *l24_v;
8392         uint8_t tos, ihl_m, ihl_v;
8393
8394         if (inner) {
8395                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8396                                          inner_headers);
8397                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8398         } else {
8399                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8400                                          outer_headers);
8401                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8402         }
8403         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8404         if (!ipv4_v)
8405                 return;
8406         if (!ipv4_m)
8407                 ipv4_m = &nic_mask;
8408         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8409                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8410         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8411                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8412         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8413         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8414         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8415                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8416         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8417                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8418         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8419         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8420         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8421         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8422         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8423         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8424         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8425         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8426                  ipv4_m->hdr.type_of_service);
8427         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8428         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8429                  ipv4_m->hdr.type_of_service >> 2);
8430         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8431         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8432                  ipv4_m->hdr.next_proto_id);
8433         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8434                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8436                  ipv4_m->hdr.time_to_live);
8437         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8438                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8439         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8440                  !!(ipv4_m->hdr.fragment_offset));
8441         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8442                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8443 }
8444
8445 /**
8446  * Add IPV6 item to matcher and to the value.
8447  *
8448  * @param[in, out] matcher
8449  *   Flow matcher.
8450  * @param[in, out] key
8451  *   Flow matcher value.
8452  * @param[in] item
8453  *   Flow pattern to translate.
8454  * @param[in] inner
8455  *   Item is inner pattern.
8456  * @param[in] group
8457  *   The group to insert the rule.
8458  */
8459 static void
8460 flow_dv_translate_item_ipv6(void *matcher, void *key,
8461                             const struct rte_flow_item *item,
8462                             int inner, uint32_t group)
8463 {
8464         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8465         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8466         const struct rte_flow_item_ipv6 nic_mask = {
8467                 .hdr = {
8468                         .src_addr =
8469                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8470                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8471                         .dst_addr =
8472                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8473                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8474                         .vtc_flow = RTE_BE32(0xffffffff),
8475                         .proto = 0xff,
8476                         .hop_limits = 0xff,
8477                 },
8478         };
8479         void *headers_m;
8480         void *headers_v;
8481         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8482         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8483         char *l24_m;
8484         char *l24_v;
8485         uint32_t vtc_m;
8486         uint32_t vtc_v;
8487         int i;
8488         int size;
8489
8490         if (inner) {
8491                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8492                                          inner_headers);
8493                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8494         } else {
8495                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8496                                          outer_headers);
8497                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8498         }
8499         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8500         if (!ipv6_v)
8501                 return;
8502         if (!ipv6_m)
8503                 ipv6_m = &nic_mask;
8504         size = sizeof(ipv6_m->hdr.dst_addr);
8505         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8506                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8507         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8508                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8509         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8510         for (i = 0; i < size; ++i)
8511                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8512         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8513                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8514         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8515                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8516         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8517         for (i = 0; i < size; ++i)
8518                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8519         /* TOS. */
8520         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8521         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8522         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8523         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8524         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8525         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8526         /* Label. */
8527         if (inner) {
8528                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8529                          vtc_m);
8530                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8531                          vtc_v);
8532         } else {
8533                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8534                          vtc_m);
8535                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8536                          vtc_v);
8537         }
8538         /* Protocol. */
8539         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8540                  ipv6_m->hdr.proto);
8541         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8542                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8543         /* Hop limit. */
8544         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8545                  ipv6_m->hdr.hop_limits);
8546         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8547                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8548         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8549                  !!(ipv6_m->has_frag_ext));
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8551                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8552 }
8553
8554 /**
8555  * Add IPV6 fragment extension item to matcher and to the value.
8556  *
8557  * @param[in, out] matcher
8558  *   Flow matcher.
8559  * @param[in, out] key
8560  *   Flow matcher value.
8561  * @param[in] item
8562  *   Flow pattern to translate.
8563  * @param[in] inner
8564  *   Item is inner pattern.
8565  */
8566 static void
8567 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8568                                      const struct rte_flow_item *item,
8569                                      int inner)
8570 {
8571         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8572         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8573         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8574                 .hdr = {
8575                         .next_header = 0xff,
8576                         .frag_data = RTE_BE16(0xffff),
8577                 },
8578         };
8579         void *headers_m;
8580         void *headers_v;
8581
8582         if (inner) {
8583                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8584                                          inner_headers);
8585                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8586         } else {
8587                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8588                                          outer_headers);
8589                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8590         }
8591         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8592         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8593         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8594         if (!ipv6_frag_ext_v)
8595                 return;
8596         if (!ipv6_frag_ext_m)
8597                 ipv6_frag_ext_m = &nic_mask;
8598         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8599                  ipv6_frag_ext_m->hdr.next_header);
8600         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8601                  ipv6_frag_ext_v->hdr.next_header &
8602                  ipv6_frag_ext_m->hdr.next_header);
8603 }
8604
8605 /**
8606  * Add TCP item to matcher and to the value.
8607  *
8608  * @param[in, out] matcher
8609  *   Flow matcher.
8610  * @param[in, out] key
8611  *   Flow matcher value.
8612  * @param[in] item
8613  *   Flow pattern to translate.
8614  * @param[in] inner
8615  *   Item is inner pattern.
8616  */
8617 static void
8618 flow_dv_translate_item_tcp(void *matcher, void *key,
8619                            const struct rte_flow_item *item,
8620                            int inner)
8621 {
8622         const struct rte_flow_item_tcp *tcp_m = item->mask;
8623         const struct rte_flow_item_tcp *tcp_v = item->spec;
8624         void *headers_m;
8625         void *headers_v;
8626
8627         if (inner) {
8628                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8629                                          inner_headers);
8630                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8631         } else {
8632                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8633                                          outer_headers);
8634                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8635         }
8636         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8637         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8638         if (!tcp_v)
8639                 return;
8640         if (!tcp_m)
8641                 tcp_m = &rte_flow_item_tcp_mask;
8642         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8643                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8644         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8645                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8646         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8647                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8648         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8649                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8650         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8651                  tcp_m->hdr.tcp_flags);
8652         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8653                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8654 }
8655
8656 /**
8657  * Add UDP item to matcher and to the value.
8658  *
8659  * @param[in, out] matcher
8660  *   Flow matcher.
8661  * @param[in, out] key
8662  *   Flow matcher value.
8663  * @param[in] item
8664  *   Flow pattern to translate.
8665  * @param[in] inner
8666  *   Item is inner pattern.
8667  */
8668 static void
8669 flow_dv_translate_item_udp(void *matcher, void *key,
8670                            const struct rte_flow_item *item,
8671                            int inner)
8672 {
8673         const struct rte_flow_item_udp *udp_m = item->mask;
8674         const struct rte_flow_item_udp *udp_v = item->spec;
8675         void *headers_m;
8676         void *headers_v;
8677
8678         if (inner) {
8679                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8680                                          inner_headers);
8681                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8682         } else {
8683                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8684                                          outer_headers);
8685                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8686         }
8687         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8688         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8689         if (!udp_v)
8690                 return;
8691         if (!udp_m)
8692                 udp_m = &rte_flow_item_udp_mask;
8693         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8694                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8695         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8696                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8697         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8698                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8699         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8700                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8701 }
8702
8703 /**
8704  * Add GRE optional Key item to matcher and to the value.
8705  *
8706  * @param[in, out] matcher
8707  *   Flow matcher.
8708  * @param[in, out] key
8709  *   Flow matcher value.
8710  * @param[in] item
8711  *   Flow pattern to translate.
8712  * @param[in] inner
8713  *   Item is inner pattern.
8714  */
8715 static void
8716 flow_dv_translate_item_gre_key(void *matcher, void *key,
8717                                    const struct rte_flow_item *item)
8718 {
8719         const rte_be32_t *key_m = item->mask;
8720         const rte_be32_t *key_v = item->spec;
8721         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8722         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8723         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8724
8725         /* GRE K bit must be on and should already be validated */
8726         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8727         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8728         if (!key_v)
8729                 return;
8730         if (!key_m)
8731                 key_m = &gre_key_default_mask;
8732         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8733                  rte_be_to_cpu_32(*key_m) >> 8);
8734         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8735                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8736         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8737                  rte_be_to_cpu_32(*key_m) & 0xFF);
8738         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8739                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8740 }
8741
8742 /**
8743  * Add GRE item to matcher and to the value.
8744  *
8745  * @param[in, out] matcher
8746  *   Flow matcher.
8747  * @param[in, out] key
8748  *   Flow matcher value.
8749  * @param[in] item
8750  *   Flow pattern to translate.
8751  * @param[in] pattern_flags
8752  *   Accumulated pattern flags.
8753  */
8754 static void
8755 flow_dv_translate_item_gre(void *matcher, void *key,
8756                            const struct rte_flow_item *item,
8757                            uint64_t pattern_flags)
8758 {
8759         static const struct rte_flow_item_gre empty_gre = {0,};
8760         const struct rte_flow_item_gre *gre_m = item->mask;
8761         const struct rte_flow_item_gre *gre_v = item->spec;
8762         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8763         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8764         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8765         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8766         struct {
8767                 union {
8768                         __extension__
8769                         struct {
8770                                 uint16_t version:3;
8771                                 uint16_t rsvd0:9;
8772                                 uint16_t s_present:1;
8773                                 uint16_t k_present:1;
8774                                 uint16_t rsvd_bit1:1;
8775                                 uint16_t c_present:1;
8776                         };
8777                         uint16_t value;
8778                 };
8779         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8780         uint16_t protocol_m, protocol_v;
8781
8782         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8783         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8784         if (!gre_v) {
8785                 gre_v = &empty_gre;
8786                 gre_m = &empty_gre;
8787         } else {
8788                 if (!gre_m)
8789                         gre_m = &rte_flow_item_gre_mask;
8790         }
8791         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8792         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8793         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8794                  gre_crks_rsvd0_ver_m.c_present);
8795         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8796                  gre_crks_rsvd0_ver_v.c_present &
8797                  gre_crks_rsvd0_ver_m.c_present);
8798         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8799                  gre_crks_rsvd0_ver_m.k_present);
8800         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8801                  gre_crks_rsvd0_ver_v.k_present &
8802                  gre_crks_rsvd0_ver_m.k_present);
8803         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8804                  gre_crks_rsvd0_ver_m.s_present);
8805         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8806                  gre_crks_rsvd0_ver_v.s_present &
8807                  gre_crks_rsvd0_ver_m.s_present);
8808         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8809         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8810         if (!protocol_m) {
8811                 /* Force next protocol to prevent matchers duplication */
8812                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8813                 if (protocol_v)
8814                         protocol_m = 0xFFFF;
8815         }
8816         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8817         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8818                  protocol_m & protocol_v);
8819 }
8820
8821 /**
8822  * Add NVGRE item to matcher and to the value.
8823  *
8824  * @param[in, out] matcher
8825  *   Flow matcher.
8826  * @param[in, out] key
8827  *   Flow matcher value.
8828  * @param[in] item
8829  *   Flow pattern to translate.
8830  * @param[in] pattern_flags
8831  *   Accumulated pattern flags.
8832  */
8833 static void
8834 flow_dv_translate_item_nvgre(void *matcher, void *key,
8835                              const struct rte_flow_item *item,
8836                              unsigned long pattern_flags)
8837 {
8838         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8839         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8840         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8841         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8842         const char *tni_flow_id_m;
8843         const char *tni_flow_id_v;
8844         char *gre_key_m;
8845         char *gre_key_v;
8846         int size;
8847         int i;
8848
8849         /* For NVGRE, GRE header fields must be set with defined values. */
8850         const struct rte_flow_item_gre gre_spec = {
8851                 .c_rsvd0_ver = RTE_BE16(0x2000),
8852                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8853         };
8854         const struct rte_flow_item_gre gre_mask = {
8855                 .c_rsvd0_ver = RTE_BE16(0xB000),
8856                 .protocol = RTE_BE16(UINT16_MAX),
8857         };
8858         const struct rte_flow_item gre_item = {
8859                 .spec = &gre_spec,
8860                 .mask = &gre_mask,
8861                 .last = NULL,
8862         };
8863         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
8864         if (!nvgre_v)
8865                 return;
8866         if (!nvgre_m)
8867                 nvgre_m = &rte_flow_item_nvgre_mask;
8868         tni_flow_id_m = (const char *)nvgre_m->tni;
8869         tni_flow_id_v = (const char *)nvgre_v->tni;
8870         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8871         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8872         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8873         memcpy(gre_key_m, tni_flow_id_m, size);
8874         for (i = 0; i < size; ++i)
8875                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8876 }
8877
8878 /**
8879  * Add VXLAN item to matcher and to the value.
8880  *
8881  * @param[in] dev
8882  *   Pointer to the Ethernet device structure.
8883  * @param[in] attr
8884  *   Flow rule attributes.
8885  * @param[in, out] matcher
8886  *   Flow matcher.
8887  * @param[in, out] key
8888  *   Flow matcher value.
8889  * @param[in] item
8890  *   Flow pattern to translate.
8891  * @param[in] inner
8892  *   Item is inner pattern.
8893  */
8894 static void
8895 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8896                              const struct rte_flow_attr *attr,
8897                              void *matcher, void *key,
8898                              const struct rte_flow_item *item,
8899                              int inner)
8900 {
8901         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8902         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8903         void *headers_m;
8904         void *headers_v;
8905         void *misc5_m;
8906         void *misc5_v;
8907         uint32_t *tunnel_header_v;
8908         uint32_t *tunnel_header_m;
8909         uint16_t dport;
8910         struct mlx5_priv *priv = dev->data->dev_private;
8911         const struct rte_flow_item_vxlan nic_mask = {
8912                 .vni = "\xff\xff\xff",
8913                 .rsvd1 = 0xff,
8914         };
8915
8916         if (inner) {
8917                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8918                                          inner_headers);
8919                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8920         } else {
8921                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8922                                          outer_headers);
8923                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8924         }
8925         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8926                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8927         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8928                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8929                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8930         }
8931         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8932         if (!vxlan_v)
8933                 return;
8934         if (!vxlan_m) {
8935                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8936                     (attr->group && !priv->sh->misc5_cap))
8937                         vxlan_m = &rte_flow_item_vxlan_mask;
8938                 else
8939                         vxlan_m = &nic_mask;
8940         }
8941         if ((priv->sh->steering_format_version ==
8942             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8943             dport != MLX5_UDP_PORT_VXLAN) ||
8944             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8945             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8946                 void *misc_m;
8947                 void *misc_v;
8948                 char *vni_m;
8949                 char *vni_v;
8950                 int size;
8951                 int i;
8952                 misc_m = MLX5_ADDR_OF(fte_match_param,
8953                                       matcher, misc_parameters);
8954                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8955                 size = sizeof(vxlan_m->vni);
8956                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8957                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8958                 memcpy(vni_m, vxlan_m->vni, size);
8959                 for (i = 0; i < size; ++i)
8960                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8961                 return;
8962         }
8963         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8964         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8965         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8966                                                    misc5_v,
8967                                                    tunnel_header_1);
8968         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8969                                                    misc5_m,
8970                                                    tunnel_header_1);
8971         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8972                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8973                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8974         if (*tunnel_header_v)
8975                 *tunnel_header_m = vxlan_m->vni[0] |
8976                         vxlan_m->vni[1] << 8 |
8977                         vxlan_m->vni[2] << 16;
8978         else
8979                 *tunnel_header_m = 0x0;
8980         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8981         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8982                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8983 }
8984
8985 /**
8986  * Add VXLAN-GPE item to matcher and to the value.
8987  *
8988  * @param[in, out] matcher
8989  *   Flow matcher.
8990  * @param[in, out] key
8991  *   Flow matcher value.
8992  * @param[in] item
8993  *   Flow pattern to translate.
8994  * @param[in] inner
8995  *   Item is inner pattern.
8996  */
8997
8998 static void
8999 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9000                                  const struct rte_flow_item *item,
9001                                  const uint64_t pattern_flags)
9002 {
9003         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9004         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9005         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9006         /* The item was validated to be on the outer side */
9007         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9008         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9009         void *misc_m =
9010                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9011         void *misc_v =
9012                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9013         char *vni_m =
9014                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9015         char *vni_v =
9016                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9017         int i, size = sizeof(vxlan_m->vni);
9018         uint8_t flags_m = 0xff;
9019         uint8_t flags_v = 0xc;
9020         uint8_t m_protocol, v_protocol;
9021
9022         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9023                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9024                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9025                          MLX5_UDP_PORT_VXLAN_GPE);
9026         }
9027         if (!vxlan_v) {
9028                 vxlan_v = &dummy_vxlan_gpe_hdr;
9029                 vxlan_m = &dummy_vxlan_gpe_hdr;
9030         } else {
9031                 if (!vxlan_m)
9032                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9033         }
9034         memcpy(vni_m, vxlan_m->vni, size);
9035         for (i = 0; i < size; ++i)
9036                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9037         if (vxlan_m->flags) {
9038                 flags_m = vxlan_m->flags;
9039                 flags_v = vxlan_v->flags;
9040         }
9041         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9042         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9043         m_protocol = vxlan_m->protocol;
9044         v_protocol = vxlan_v->protocol;
9045         if (!m_protocol) {
9046                 /* Force next protocol to ensure next headers parsing. */
9047                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9048                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9049                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9050                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9051                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9052                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9053                 if (v_protocol)
9054                         m_protocol = 0xFF;
9055         }
9056         MLX5_SET(fte_match_set_misc3, misc_m,
9057                  outer_vxlan_gpe_next_protocol, m_protocol);
9058         MLX5_SET(fte_match_set_misc3, misc_v,
9059                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9060 }
9061
9062 /**
9063  * Add Geneve item to matcher and to the value.
9064  *
9065  * @param[in, out] matcher
9066  *   Flow matcher.
9067  * @param[in, out] key
9068  *   Flow matcher value.
9069  * @param[in] item
9070  *   Flow pattern to translate.
9071  * @param[in] inner
9072  *   Item is inner pattern.
9073  */
9074
9075 static void
9076 flow_dv_translate_item_geneve(void *matcher, void *key,
9077                               const struct rte_flow_item *item,
9078                               uint64_t pattern_flags)
9079 {
9080         static const struct rte_flow_item_geneve empty_geneve = {0,};
9081         const struct rte_flow_item_geneve *geneve_m = item->mask;
9082         const struct rte_flow_item_geneve *geneve_v = item->spec;
9083         /* GENEVE flow item validation allows single tunnel item */
9084         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9085         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9086         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9087         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9088         uint16_t gbhdr_m;
9089         uint16_t gbhdr_v;
9090         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9091         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9092         size_t size = sizeof(geneve_m->vni), i;
9093         uint16_t protocol_m, protocol_v;
9094
9095         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9096                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9097                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9098                          MLX5_UDP_PORT_GENEVE);
9099         }
9100         if (!geneve_v) {
9101                 geneve_v = &empty_geneve;
9102                 geneve_m = &empty_geneve;
9103         } else {
9104                 if (!geneve_m)
9105                         geneve_m = &rte_flow_item_geneve_mask;
9106         }
9107         memcpy(vni_m, geneve_m->vni, size);
9108         for (i = 0; i < size; ++i)
9109                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9110         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9111         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9112         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9113                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9114         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9115                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9116         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9117                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9118         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9119                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9120                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9121         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9122         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9123         if (!protocol_m) {
9124                 /* Force next protocol to prevent matchers duplication */
9125                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9126                 if (protocol_v)
9127                         protocol_m = 0xFFFF;
9128         }
9129         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9130         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9131                  protocol_m & protocol_v);
9132 }
9133
9134 /**
9135  * Create Geneve TLV option resource.
9136  *
9137  * @param dev[in, out]
9138  *   Pointer to rte_eth_dev structure.
9139  * @param[in, out] tag_be24
9140  *   Tag value in big endian then R-shift 8.
9141  * @parm[in, out] dev_flow
9142  *   Pointer to the dev_flow.
9143  * @param[out] error
9144  *   pointer to error structure.
9145  *
9146  * @return
9147  *   0 on success otherwise -errno and errno is set.
9148  */
9149
9150 int
9151 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9152                                              const struct rte_flow_item *item,
9153                                              struct rte_flow_error *error)
9154 {
9155         struct mlx5_priv *priv = dev->data->dev_private;
9156         struct mlx5_dev_ctx_shared *sh = priv->sh;
9157         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9158                         sh->geneve_tlv_option_resource;
9159         struct mlx5_devx_obj *obj;
9160         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9161         int ret = 0;
9162
9163         if (!geneve_opt_v)
9164                 return -1;
9165         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9166         if (geneve_opt_resource != NULL) {
9167                 if (geneve_opt_resource->option_class ==
9168                         geneve_opt_v->option_class &&
9169                         geneve_opt_resource->option_type ==
9170                         geneve_opt_v->option_type &&
9171                         geneve_opt_resource->length ==
9172                         geneve_opt_v->option_len) {
9173                         /* We already have GENEVE TLV option obj allocated. */
9174                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9175                                            __ATOMIC_RELAXED);
9176                 } else {
9177                         ret = rte_flow_error_set(error, ENOMEM,
9178                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9179                                 "Only one GENEVE TLV option supported");
9180                         goto exit;
9181                 }
9182         } else {
9183                 /* Create a GENEVE TLV object and resource. */
9184                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9185                                 geneve_opt_v->option_class,
9186                                 geneve_opt_v->option_type,
9187                                 geneve_opt_v->option_len);
9188                 if (!obj) {
9189                         ret = rte_flow_error_set(error, ENODATA,
9190                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9191                                 "Failed to create GENEVE TLV Devx object");
9192                         goto exit;
9193                 }
9194                 sh->geneve_tlv_option_resource =
9195                                 mlx5_malloc(MLX5_MEM_ZERO,
9196                                                 sizeof(*geneve_opt_resource),
9197                                                 0, SOCKET_ID_ANY);
9198                 if (!sh->geneve_tlv_option_resource) {
9199                         claim_zero(mlx5_devx_cmd_destroy(obj));
9200                         ret = rte_flow_error_set(error, ENOMEM,
9201                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9202                                 "GENEVE TLV object memory allocation failed");
9203                         goto exit;
9204                 }
9205                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9206                 geneve_opt_resource->obj = obj;
9207                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9208                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9209                 geneve_opt_resource->length = geneve_opt_v->option_len;
9210                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9211                                 __ATOMIC_RELAXED);
9212         }
9213 exit:
9214         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9215         return ret;
9216 }
9217
9218 /**
9219  * Add Geneve TLV option item to matcher.
9220  *
9221  * @param[in, out] dev
9222  *   Pointer to rte_eth_dev structure.
9223  * @param[in, out] matcher
9224  *   Flow matcher.
9225  * @param[in, out] key
9226  *   Flow matcher value.
9227  * @param[in] item
9228  *   Flow pattern to translate.
9229  * @param[out] error
9230  *   Pointer to error structure.
9231  */
9232 static int
9233 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9234                                   void *key, const struct rte_flow_item *item,
9235                                   struct rte_flow_error *error)
9236 {
9237         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9238         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9239         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9240         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9241         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9242                         misc_parameters_3);
9243         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9244         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9245         int ret = 0;
9246
9247         if (!geneve_opt_v)
9248                 return -1;
9249         if (!geneve_opt_m)
9250                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9251         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9252                                                            error);
9253         if (ret) {
9254                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9255                 return ret;
9256         }
9257         /*
9258          * Set the option length in GENEVE header if not requested.
9259          * The GENEVE TLV option length is expressed by the option length field
9260          * in the GENEVE header.
9261          * If the option length was not requested but the GENEVE TLV option item
9262          * is present we set the option length field implicitly.
9263          */
9264         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9265                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9266                          MLX5_GENEVE_OPTLEN_MASK);
9267                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9268                          geneve_opt_v->option_len + 1);
9269         }
9270         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9271         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9272         /* Set the data. */
9273         if (geneve_opt_v->data) {
9274                 memcpy(&opt_data_key, geneve_opt_v->data,
9275                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9276                                 sizeof(opt_data_key)));
9277                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9278                                 sizeof(opt_data_key));
9279                 memcpy(&opt_data_mask, geneve_opt_m->data,
9280                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9281                                 sizeof(opt_data_mask)));
9282                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9283                                 sizeof(opt_data_mask));
9284                 MLX5_SET(fte_match_set_misc3, misc3_m,
9285                                 geneve_tlv_option_0_data,
9286                                 rte_be_to_cpu_32(opt_data_mask));
9287                 MLX5_SET(fte_match_set_misc3, misc3_v,
9288                                 geneve_tlv_option_0_data,
9289                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9290         }
9291         return ret;
9292 }
9293
9294 /**
9295  * Add MPLS item to matcher and to the value.
9296  *
9297  * @param[in, out] matcher
9298  *   Flow matcher.
9299  * @param[in, out] key
9300  *   Flow matcher value.
9301  * @param[in] item
9302  *   Flow pattern to translate.
9303  * @param[in] prev_layer
9304  *   The protocol layer indicated in previous item.
9305  * @param[in] inner
9306  *   Item is inner pattern.
9307  */
9308 static void
9309 flow_dv_translate_item_mpls(void *matcher, void *key,
9310                             const struct rte_flow_item *item,
9311                             uint64_t prev_layer,
9312                             int inner)
9313 {
9314         const uint32_t *in_mpls_m = item->mask;
9315         const uint32_t *in_mpls_v = item->spec;
9316         uint32_t *out_mpls_m = 0;
9317         uint32_t *out_mpls_v = 0;
9318         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9319         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9320         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9321                                      misc_parameters_2);
9322         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9323         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9324         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9325
9326         switch (prev_layer) {
9327         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9328                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9329                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9330                                  0xffff);
9331                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9332                                  MLX5_UDP_PORT_MPLS);
9333                 }
9334                 break;
9335         case MLX5_FLOW_LAYER_GRE:
9336                 /* Fall-through. */
9337         case MLX5_FLOW_LAYER_GRE_KEY:
9338                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9339                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9340                                  0xffff);
9341                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9342                                  RTE_ETHER_TYPE_MPLS);
9343                 }
9344                 break;
9345         default:
9346                 break;
9347         }
9348         if (!in_mpls_v)
9349                 return;
9350         if (!in_mpls_m)
9351                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9352         switch (prev_layer) {
9353         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9354                 out_mpls_m =
9355                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9356                                                  outer_first_mpls_over_udp);
9357                 out_mpls_v =
9358                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9359                                                  outer_first_mpls_over_udp);
9360                 break;
9361         case MLX5_FLOW_LAYER_GRE:
9362                 out_mpls_m =
9363                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9364                                                  outer_first_mpls_over_gre);
9365                 out_mpls_v =
9366                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9367                                                  outer_first_mpls_over_gre);
9368                 break;
9369         default:
9370                 /* Inner MPLS not over GRE is not supported. */
9371                 if (!inner) {
9372                         out_mpls_m =
9373                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9374                                                          misc2_m,
9375                                                          outer_first_mpls);
9376                         out_mpls_v =
9377                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9378                                                          misc2_v,
9379                                                          outer_first_mpls);
9380                 }
9381                 break;
9382         }
9383         if (out_mpls_m && out_mpls_v) {
9384                 *out_mpls_m = *in_mpls_m;
9385                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9386         }
9387 }
9388
9389 /**
9390  * Add metadata register item to matcher
9391  *
9392  * @param[in, out] matcher
9393  *   Flow matcher.
9394  * @param[in, out] key
9395  *   Flow matcher value.
9396  * @param[in] reg_type
9397  *   Type of device metadata register
9398  * @param[in] value
9399  *   Register value
9400  * @param[in] mask
9401  *   Register mask
9402  */
9403 static void
9404 flow_dv_match_meta_reg(void *matcher, void *key,
9405                        enum modify_reg reg_type,
9406                        uint32_t data, uint32_t mask)
9407 {
9408         void *misc2_m =
9409                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9410         void *misc2_v =
9411                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9412         uint32_t temp;
9413
9414         data &= mask;
9415         switch (reg_type) {
9416         case REG_A:
9417                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9418                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9419                 break;
9420         case REG_B:
9421                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9422                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9423                 break;
9424         case REG_C_0:
9425                 /*
9426                  * The metadata register C0 field might be divided into
9427                  * source vport index and META item value, we should set
9428                  * this field according to specified mask, not as whole one.
9429                  */
9430                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9431                 temp |= mask;
9432                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9433                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9434                 temp &= ~mask;
9435                 temp |= data;
9436                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9437                 break;
9438         case REG_C_1:
9439                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9440                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9441                 break;
9442         case REG_C_2:
9443                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9444                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9445                 break;
9446         case REG_C_3:
9447                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9448                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9449                 break;
9450         case REG_C_4:
9451                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9452                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9453                 break;
9454         case REG_C_5:
9455                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9456                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9457                 break;
9458         case REG_C_6:
9459                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9460                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9461                 break;
9462         case REG_C_7:
9463                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9464                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9465                 break;
9466         default:
9467                 MLX5_ASSERT(false);
9468                 break;
9469         }
9470 }
9471
9472 /**
9473  * Add MARK item to matcher
9474  *
9475  * @param[in] dev
9476  *   The device to configure through.
9477  * @param[in, out] matcher
9478  *   Flow matcher.
9479  * @param[in, out] key
9480  *   Flow matcher value.
9481  * @param[in] item
9482  *   Flow pattern to translate.
9483  */
9484 static void
9485 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9486                             void *matcher, void *key,
9487                             const struct rte_flow_item *item)
9488 {
9489         struct mlx5_priv *priv = dev->data->dev_private;
9490         const struct rte_flow_item_mark *mark;
9491         uint32_t value;
9492         uint32_t mask;
9493
9494         mark = item->mask ? (const void *)item->mask :
9495                             &rte_flow_item_mark_mask;
9496         mask = mark->id & priv->sh->dv_mark_mask;
9497         mark = (const void *)item->spec;
9498         MLX5_ASSERT(mark);
9499         value = mark->id & priv->sh->dv_mark_mask & mask;
9500         if (mask) {
9501                 enum modify_reg reg;
9502
9503                 /* Get the metadata register index for the mark. */
9504                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9505                 MLX5_ASSERT(reg > 0);
9506                 if (reg == REG_C_0) {
9507                         struct mlx5_priv *priv = dev->data->dev_private;
9508                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9509                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9510
9511                         mask &= msk_c0;
9512                         mask <<= shl_c0;
9513                         value <<= shl_c0;
9514                 }
9515                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9516         }
9517 }
9518
9519 /**
9520  * Add META item to matcher
9521  *
9522  * @param[in] dev
9523  *   The devich to configure through.
9524  * @param[in, out] matcher
9525  *   Flow matcher.
9526  * @param[in, out] key
9527  *   Flow matcher value.
9528  * @param[in] attr
9529  *   Attributes of flow that includes this item.
9530  * @param[in] item
9531  *   Flow pattern to translate.
9532  */
9533 static void
9534 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9535                             void *matcher, void *key,
9536                             const struct rte_flow_attr *attr,
9537                             const struct rte_flow_item *item)
9538 {
9539         const struct rte_flow_item_meta *meta_m;
9540         const struct rte_flow_item_meta *meta_v;
9541
9542         meta_m = (const void *)item->mask;
9543         if (!meta_m)
9544                 meta_m = &rte_flow_item_meta_mask;
9545         meta_v = (const void *)item->spec;
9546         if (meta_v) {
9547                 int reg;
9548                 uint32_t value = meta_v->data;
9549                 uint32_t mask = meta_m->data;
9550
9551                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9552                 if (reg < 0)
9553                         return;
9554                 MLX5_ASSERT(reg != REG_NON);
9555                 if (reg == REG_C_0) {
9556                         struct mlx5_priv *priv = dev->data->dev_private;
9557                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9558                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9559
9560                         mask &= msk_c0;
9561                         mask <<= shl_c0;
9562                         value <<= shl_c0;
9563                 }
9564                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9565         }
9566 }
9567
9568 /**
9569  * Add vport metadata Reg C0 item to matcher
9570  *
9571  * @param[in, out] matcher
9572  *   Flow matcher.
9573  * @param[in, out] key
9574  *   Flow matcher value.
9575  * @param[in] reg
9576  *   Flow pattern to translate.
9577  */
9578 static void
9579 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9580                                   uint32_t value, uint32_t mask)
9581 {
9582         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9583 }
9584
9585 /**
9586  * Add tag item to matcher
9587  *
9588  * @param[in] dev
9589  *   The devich to configure through.
9590  * @param[in, out] matcher
9591  *   Flow matcher.
9592  * @param[in, out] key
9593  *   Flow matcher value.
9594  * @param[in] item
9595  *   Flow pattern to translate.
9596  */
9597 static void
9598 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9599                                 void *matcher, void *key,
9600                                 const struct rte_flow_item *item)
9601 {
9602         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9603         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9604         uint32_t mask, value;
9605
9606         MLX5_ASSERT(tag_v);
9607         value = tag_v->data;
9608         mask = tag_m ? tag_m->data : UINT32_MAX;
9609         if (tag_v->id == REG_C_0) {
9610                 struct mlx5_priv *priv = dev->data->dev_private;
9611                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9612                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9613
9614                 mask &= msk_c0;
9615                 mask <<= shl_c0;
9616                 value <<= shl_c0;
9617         }
9618         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9619 }
9620
9621 /**
9622  * Add TAG item to matcher
9623  *
9624  * @param[in] dev
9625  *   The devich to configure through.
9626  * @param[in, out] matcher
9627  *   Flow matcher.
9628  * @param[in, out] key
9629  *   Flow matcher value.
9630  * @param[in] item
9631  *   Flow pattern to translate.
9632  */
9633 static void
9634 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9635                            void *matcher, void *key,
9636                            const struct rte_flow_item *item)
9637 {
9638         const struct rte_flow_item_tag *tag_v = item->spec;
9639         const struct rte_flow_item_tag *tag_m = item->mask;
9640         enum modify_reg reg;
9641
9642         MLX5_ASSERT(tag_v);
9643         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9644         /* Get the metadata register index for the tag. */
9645         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9646         MLX5_ASSERT(reg > 0);
9647         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9648 }
9649
9650 /**
9651  * Add source vport match to the specified matcher.
9652  *
9653  * @param[in, out] matcher
9654  *   Flow matcher.
9655  * @param[in, out] key
9656  *   Flow matcher value.
9657  * @param[in] port
9658  *   Source vport value to match
9659  * @param[in] mask
9660  *   Mask
9661  */
9662 static void
9663 flow_dv_translate_item_source_vport(void *matcher, void *key,
9664                                     int16_t port, uint16_t mask)
9665 {
9666         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9667         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9668
9669         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9670         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9671 }
9672
9673 /**
9674  * Translate port-id item to eswitch match on  port-id.
9675  *
9676  * @param[in] dev
9677  *   The devich to configure through.
9678  * @param[in, out] matcher
9679  *   Flow matcher.
9680  * @param[in, out] key
9681  *   Flow matcher value.
9682  * @param[in] item
9683  *   Flow pattern to translate.
9684  * @param[in]
9685  *   Flow attributes.
9686  *
9687  * @return
9688  *   0 on success, a negative errno value otherwise.
9689  */
9690 static int
9691 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9692                                void *key, const struct rte_flow_item *item,
9693                                const struct rte_flow_attr *attr)
9694 {
9695         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9696         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9697         struct mlx5_priv *priv;
9698         uint16_t mask, id;
9699
9700         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9701                 flow_dv_translate_item_source_vport(matcher, key,
9702                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9703                 return 0;
9704         }
9705         mask = pid_m ? pid_m->id : 0xffff;
9706         id = pid_v ? pid_v->id : dev->data->port_id;
9707         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9708         if (!priv)
9709                 return -rte_errno;
9710         /*
9711          * Translate to vport field or to metadata, depending on mode.
9712          * Kernel can use either misc.source_port or half of C0 metadata
9713          * register.
9714          */
9715         if (priv->vport_meta_mask) {
9716                 /*
9717                  * Provide the hint for SW steering library
9718                  * to insert the flow into ingress domain and
9719                  * save the extra vport match.
9720                  */
9721                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9722                     priv->pf_bond < 0 && attr->transfer)
9723                         flow_dv_translate_item_source_vport
9724                                 (matcher, key, priv->vport_id, mask);
9725                 /*
9726                  * We should always set the vport metadata register,
9727                  * otherwise the SW steering library can drop
9728                  * the rule if wire vport metadata value is not zero,
9729                  * it depends on kernel configuration.
9730                  */
9731                 flow_dv_translate_item_meta_vport(matcher, key,
9732                                                   priv->vport_meta_tag,
9733                                                   priv->vport_meta_mask);
9734         } else {
9735                 flow_dv_translate_item_source_vport(matcher, key,
9736                                                     priv->vport_id, mask);
9737         }
9738         return 0;
9739 }
9740
9741 /**
9742  * Add ICMP6 item to matcher and to the value.
9743  *
9744  * @param[in, out] matcher
9745  *   Flow matcher.
9746  * @param[in, out] key
9747  *   Flow matcher value.
9748  * @param[in] item
9749  *   Flow pattern to translate.
9750  * @param[in] inner
9751  *   Item is inner pattern.
9752  */
9753 static void
9754 flow_dv_translate_item_icmp6(void *matcher, void *key,
9755                               const struct rte_flow_item *item,
9756                               int inner)
9757 {
9758         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9759         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9760         void *headers_m;
9761         void *headers_v;
9762         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9763                                      misc_parameters_3);
9764         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9765         if (inner) {
9766                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9767                                          inner_headers);
9768                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9769         } else {
9770                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9771                                          outer_headers);
9772                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9773         }
9774         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9775         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9776         if (!icmp6_v)
9777                 return;
9778         if (!icmp6_m)
9779                 icmp6_m = &rte_flow_item_icmp6_mask;
9780         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9781         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9782                  icmp6_v->type & icmp6_m->type);
9783         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9784         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9785                  icmp6_v->code & icmp6_m->code);
9786 }
9787
9788 /**
9789  * Add ICMP item to matcher and to the value.
9790  *
9791  * @param[in, out] matcher
9792  *   Flow matcher.
9793  * @param[in, out] key
9794  *   Flow matcher value.
9795  * @param[in] item
9796  *   Flow pattern to translate.
9797  * @param[in] inner
9798  *   Item is inner pattern.
9799  */
9800 static void
9801 flow_dv_translate_item_icmp(void *matcher, void *key,
9802                             const struct rte_flow_item *item,
9803                             int inner)
9804 {
9805         const struct rte_flow_item_icmp *icmp_m = item->mask;
9806         const struct rte_flow_item_icmp *icmp_v = item->spec;
9807         uint32_t icmp_header_data_m = 0;
9808         uint32_t icmp_header_data_v = 0;
9809         void *headers_m;
9810         void *headers_v;
9811         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9812                                      misc_parameters_3);
9813         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9814         if (inner) {
9815                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9816                                          inner_headers);
9817                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9818         } else {
9819                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9820                                          outer_headers);
9821                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9822         }
9823         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9824         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9825         if (!icmp_v)
9826                 return;
9827         if (!icmp_m)
9828                 icmp_m = &rte_flow_item_icmp_mask;
9829         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9830                  icmp_m->hdr.icmp_type);
9831         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9832                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9833         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9834                  icmp_m->hdr.icmp_code);
9835         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9836                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9837         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9838         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9839         if (icmp_header_data_m) {
9840                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9841                 icmp_header_data_v |=
9842                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9843                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9844                          icmp_header_data_m);
9845                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9846                          icmp_header_data_v & icmp_header_data_m);
9847         }
9848 }
9849
9850 /**
9851  * Add GTP item to matcher and to the value.
9852  *
9853  * @param[in, out] matcher
9854  *   Flow matcher.
9855  * @param[in, out] key
9856  *   Flow matcher value.
9857  * @param[in] item
9858  *   Flow pattern to translate.
9859  * @param[in] inner
9860  *   Item is inner pattern.
9861  */
9862 static void
9863 flow_dv_translate_item_gtp(void *matcher, void *key,
9864                            const struct rte_flow_item *item, int inner)
9865 {
9866         const struct rte_flow_item_gtp *gtp_m = item->mask;
9867         const struct rte_flow_item_gtp *gtp_v = item->spec;
9868         void *headers_m;
9869         void *headers_v;
9870         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9871                                      misc_parameters_3);
9872         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9873         uint16_t dport = RTE_GTPU_UDP_PORT;
9874
9875         if (inner) {
9876                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9877                                          inner_headers);
9878                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9879         } else {
9880                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9881                                          outer_headers);
9882                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9883         }
9884         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9885                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9886                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9887         }
9888         if (!gtp_v)
9889                 return;
9890         if (!gtp_m)
9891                 gtp_m = &rte_flow_item_gtp_mask;
9892         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9893                  gtp_m->v_pt_rsv_flags);
9894         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9895                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9896         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9897         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9898                  gtp_v->msg_type & gtp_m->msg_type);
9899         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9900                  rte_be_to_cpu_32(gtp_m->teid));
9901         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9902                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9903 }
9904
9905 /**
9906  * Add GTP PSC item to matcher.
9907  *
9908  * @param[in, out] matcher
9909  *   Flow matcher.
9910  * @param[in, out] key
9911  *   Flow matcher value.
9912  * @param[in] item
9913  *   Flow pattern to translate.
9914  */
9915 static int
9916 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9917                                const struct rte_flow_item *item)
9918 {
9919         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9920         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9921         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9922                         misc_parameters_3);
9923         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9924         union {
9925                 uint32_t w32;
9926                 struct {
9927                         uint16_t seq_num;
9928                         uint8_t npdu_num;
9929                         uint8_t next_ext_header_type;
9930                 };
9931         } dw_2;
9932         uint8_t gtp_flags;
9933
9934         /* Always set E-flag match on one, regardless of GTP item settings. */
9935         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9936         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9937         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9938         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9939         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9940         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9941         /*Set next extension header type. */
9942         dw_2.seq_num = 0;
9943         dw_2.npdu_num = 0;
9944         dw_2.next_ext_header_type = 0xff;
9945         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9946                  rte_cpu_to_be_32(dw_2.w32));
9947         dw_2.seq_num = 0;
9948         dw_2.npdu_num = 0;
9949         dw_2.next_ext_header_type = 0x85;
9950         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9951                  rte_cpu_to_be_32(dw_2.w32));
9952         if (gtp_psc_v) {
9953                 union {
9954                         uint32_t w32;
9955                         struct {
9956                                 uint8_t len;
9957                                 uint8_t type_flags;
9958                                 uint8_t qfi;
9959                                 uint8_t reserved;
9960                         };
9961                 } dw_0;
9962
9963                 /*Set extension header PDU type and Qos. */
9964                 if (!gtp_psc_m)
9965                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9966                 dw_0.w32 = 0;
9967                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9968                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9969                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9970                          rte_cpu_to_be_32(dw_0.w32));
9971                 dw_0.w32 = 0;
9972                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9973                                                         gtp_psc_m->hdr.type);
9974                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9975                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9976                          rte_cpu_to_be_32(dw_0.w32));
9977         }
9978         return 0;
9979 }
9980
9981 /**
9982  * Add eCPRI item to matcher and to the value.
9983  *
9984  * @param[in] dev
9985  *   The devich to configure through.
9986  * @param[in, out] matcher
9987  *   Flow matcher.
9988  * @param[in, out] key
9989  *   Flow matcher value.
9990  * @param[in] item
9991  *   Flow pattern to translate.
9992  * @param[in] last_item
9993  *   Last item flags.
9994  */
9995 static void
9996 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9997                              void *key, const struct rte_flow_item *item,
9998                              uint64_t last_item)
9999 {
10000         struct mlx5_priv *priv = dev->data->dev_private;
10001         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10002         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10003         struct rte_ecpri_common_hdr common;
10004         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10005                                      misc_parameters_4);
10006         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10007         uint32_t *samples;
10008         void *dw_m;
10009         void *dw_v;
10010
10011         /*
10012          * In case of eCPRI over Ethernet, if EtherType is not specified,
10013          * match on eCPRI EtherType implicitly.
10014          */
10015         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10016                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10017
10018                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10019                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10020                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10021                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10022                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10023                         *(uint16_t *)l2m = UINT16_MAX;
10024                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10025                 }
10026         }
10027         if (!ecpri_v)
10028                 return;
10029         if (!ecpri_m)
10030                 ecpri_m = &rte_flow_item_ecpri_mask;
10031         /*
10032          * Maximal four DW samples are supported in a single matching now.
10033          * Two are used now for a eCPRI matching:
10034          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10035          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10036          *    if any.
10037          */
10038         if (!ecpri_m->hdr.common.u32)
10039                 return;
10040         samples = priv->sh->ecpri_parser.ids;
10041         /* Need to take the whole DW as the mask to fill the entry. */
10042         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10043                             prog_sample_field_value_0);
10044         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10045                             prog_sample_field_value_0);
10046         /* Already big endian (network order) in the header. */
10047         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10048         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10049         /* Sample#0, used for matching type, offset 0. */
10050         MLX5_SET(fte_match_set_misc4, misc4_m,
10051                  prog_sample_field_id_0, samples[0]);
10052         /* It makes no sense to set the sample ID in the mask field. */
10053         MLX5_SET(fte_match_set_misc4, misc4_v,
10054                  prog_sample_field_id_0, samples[0]);
10055         /*
10056          * Checking if message body part needs to be matched.
10057          * Some wildcard rules only matching type field should be supported.
10058          */
10059         if (ecpri_m->hdr.dummy[0]) {
10060                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10061                 switch (common.type) {
10062                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10063                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10064                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10065                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10066                                             prog_sample_field_value_1);
10067                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10068                                             prog_sample_field_value_1);
10069                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10070                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10071                                             ecpri_m->hdr.dummy[0];
10072                         /* Sample#1, to match message body, offset 4. */
10073                         MLX5_SET(fte_match_set_misc4, misc4_m,
10074                                  prog_sample_field_id_1, samples[1]);
10075                         MLX5_SET(fte_match_set_misc4, misc4_v,
10076                                  prog_sample_field_id_1, samples[1]);
10077                         break;
10078                 default:
10079                         /* Others, do not match any sample ID. */
10080                         break;
10081                 }
10082         }
10083 }
10084
10085 /*
10086  * Add connection tracking status item to matcher
10087  *
10088  * @param[in] dev
10089  *   The devich to configure through.
10090  * @param[in, out] matcher
10091  *   Flow matcher.
10092  * @param[in, out] key
10093  *   Flow matcher value.
10094  * @param[in] item
10095  *   Flow pattern to translate.
10096  */
10097 static void
10098 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10099                               void *matcher, void *key,
10100                               const struct rte_flow_item *item)
10101 {
10102         uint32_t reg_value = 0;
10103         int reg_id;
10104         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10105         uint32_t reg_mask = 0;
10106         const struct rte_flow_item_conntrack *spec = item->spec;
10107         const struct rte_flow_item_conntrack *mask = item->mask;
10108         uint32_t flags;
10109         struct rte_flow_error error;
10110
10111         if (!mask)
10112                 mask = &rte_flow_item_conntrack_mask;
10113         if (!spec || !mask->flags)
10114                 return;
10115         flags = spec->flags & mask->flags;
10116         /* The conflict should be checked in the validation. */
10117         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10118                 reg_value |= MLX5_CT_SYNDROME_VALID;
10119         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10120                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10121         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10122                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10123         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10124                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10125         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10126                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10127         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10128                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10129                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10130                 reg_mask |= 0xc0;
10131         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10132                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10133         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10134                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10135         /* The REG_C_x value could be saved during startup. */
10136         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10137         if (reg_id == REG_NON)
10138                 return;
10139         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10140                                reg_value, reg_mask);
10141 }
10142
10143 static void
10144 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10145                             const struct rte_flow_item *item,
10146                             struct mlx5_flow *dev_flow, bool is_inner)
10147 {
10148         const struct rte_flow_item_flex *spec =
10149                 (const struct rte_flow_item_flex *)item->spec;
10150         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10151
10152         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10153         if (index < 0)
10154                 return;
10155         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10156                 /* Don't count both inner and outer flex items in one rule. */
10157                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10158                         MLX5_ASSERT(false);
10159                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10160         }
10161         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10162 }
10163
10164 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10165
10166 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10167         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10168                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10169
10170 /**
10171  * Calculate flow matcher enable bitmap.
10172  *
10173  * @param match_criteria
10174  *   Pointer to flow matcher criteria.
10175  *
10176  * @return
10177  *   Bitmap of enabled fields.
10178  */
10179 static uint8_t
10180 flow_dv_matcher_enable(uint32_t *match_criteria)
10181 {
10182         uint8_t match_criteria_enable;
10183
10184         match_criteria_enable =
10185                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10186                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10187         match_criteria_enable |=
10188                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10189                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10190         match_criteria_enable |=
10191                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10192                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10193         match_criteria_enable |=
10194                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10195                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10196         match_criteria_enable |=
10197                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10198                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10199         match_criteria_enable |=
10200                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10201                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10202         match_criteria_enable |=
10203                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10204                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10205         return match_criteria_enable;
10206 }
10207
10208 static void
10209 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10210 {
10211         /*
10212          * Check flow matching criteria first, subtract misc5/4 length if flow
10213          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10214          * misc5/4 are not supported, and matcher creation failure is expected
10215          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10216          * misc5 is right after misc4.
10217          */
10218         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10219                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10220                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10221                 if (!(match_criteria & (1 <<
10222                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10223                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10224                 }
10225         }
10226 }
10227
10228 static struct mlx5_list_entry *
10229 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10230                          struct mlx5_list_entry *entry, void *cb_ctx)
10231 {
10232         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10233         struct mlx5_flow_dv_matcher *ref = ctx->data;
10234         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10235                                                             typeof(*tbl), tbl);
10236         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10237                                                             sizeof(*resource),
10238                                                             0, SOCKET_ID_ANY);
10239
10240         if (!resource) {
10241                 rte_flow_error_set(ctx->error, ENOMEM,
10242                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10243                                    "cannot create matcher");
10244                 return NULL;
10245         }
10246         memcpy(resource, entry, sizeof(*resource));
10247         resource->tbl = &tbl->tbl;
10248         return &resource->entry;
10249 }
10250
10251 static void
10252 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10253                              struct mlx5_list_entry *entry)
10254 {
10255         mlx5_free(entry);
10256 }
10257
10258 struct mlx5_list_entry *
10259 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10260 {
10261         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10262         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10263         struct rte_eth_dev *dev = ctx->dev;
10264         struct mlx5_flow_tbl_data_entry *tbl_data;
10265         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10266         struct rte_flow_error *error = ctx->error;
10267         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10268         struct mlx5_flow_tbl_resource *tbl;
10269         void *domain;
10270         uint32_t idx = 0;
10271         int ret;
10272
10273         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10274         if (!tbl_data) {
10275                 rte_flow_error_set(error, ENOMEM,
10276                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10277                                    NULL,
10278                                    "cannot allocate flow table data entry");
10279                 return NULL;
10280         }
10281         tbl_data->idx = idx;
10282         tbl_data->tunnel = tt_prm->tunnel;
10283         tbl_data->group_id = tt_prm->group_id;
10284         tbl_data->external = !!tt_prm->external;
10285         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10286         tbl_data->is_egress = !!key.is_egress;
10287         tbl_data->is_transfer = !!key.is_fdb;
10288         tbl_data->dummy = !!key.dummy;
10289         tbl_data->level = key.level;
10290         tbl_data->id = key.id;
10291         tbl = &tbl_data->tbl;
10292         if (key.dummy)
10293                 return &tbl_data->entry;
10294         if (key.is_fdb)
10295                 domain = sh->fdb_domain;
10296         else if (key.is_egress)
10297                 domain = sh->tx_domain;
10298         else
10299                 domain = sh->rx_domain;
10300         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10301         if (ret) {
10302                 rte_flow_error_set(error, ENOMEM,
10303                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10304                                    NULL, "cannot create flow table object");
10305                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10306                 return NULL;
10307         }
10308         if (key.level != 0) {
10309                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10310                                         (tbl->obj, &tbl_data->jump.action);
10311                 if (ret) {
10312                         rte_flow_error_set(error, ENOMEM,
10313                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10314                                            NULL,
10315                                            "cannot create flow jump action");
10316                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10317                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10318                         return NULL;
10319                 }
10320         }
10321         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10322               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10323               key.level, key.id);
10324         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10325                                               flow_dv_matcher_create_cb,
10326                                               flow_dv_matcher_match_cb,
10327                                               flow_dv_matcher_remove_cb,
10328                                               flow_dv_matcher_clone_cb,
10329                                               flow_dv_matcher_clone_free_cb);
10330         if (!tbl_data->matchers) {
10331                 rte_flow_error_set(error, ENOMEM,
10332                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10333                                    NULL,
10334                                    "cannot create tbl matcher list");
10335                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10336                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10337                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10338                 return NULL;
10339         }
10340         return &tbl_data->entry;
10341 }
10342
10343 int
10344 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10345                      void *cb_ctx)
10346 {
10347         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10348         struct mlx5_flow_tbl_data_entry *tbl_data =
10349                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10350         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10351
10352         return tbl_data->level != key.level ||
10353                tbl_data->id != key.id ||
10354                tbl_data->dummy != key.dummy ||
10355                tbl_data->is_transfer != !!key.is_fdb ||
10356                tbl_data->is_egress != !!key.is_egress;
10357 }
10358
10359 struct mlx5_list_entry *
10360 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10361                       void *cb_ctx)
10362 {
10363         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10364         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10365         struct mlx5_flow_tbl_data_entry *tbl_data;
10366         struct rte_flow_error *error = ctx->error;
10367         uint32_t idx = 0;
10368
10369         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10370         if (!tbl_data) {
10371                 rte_flow_error_set(error, ENOMEM,
10372                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10373                                    NULL,
10374                                    "cannot allocate flow table data entry");
10375                 return NULL;
10376         }
10377         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10378         tbl_data->idx = idx;
10379         return &tbl_data->entry;
10380 }
10381
10382 void
10383 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10384 {
10385         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10386         struct mlx5_flow_tbl_data_entry *tbl_data =
10387                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10388
10389         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10390 }
10391
10392 /**
10393  * Get a flow table.
10394  *
10395  * @param[in, out] dev
10396  *   Pointer to rte_eth_dev structure.
10397  * @param[in] table_level
10398  *   Table level to use.
10399  * @param[in] egress
10400  *   Direction of the table.
10401  * @param[in] transfer
10402  *   E-Switch or NIC flow.
10403  * @param[in] dummy
10404  *   Dummy entry for dv API.
10405  * @param[in] table_id
10406  *   Table id to use.
10407  * @param[out] error
10408  *   pointer to error structure.
10409  *
10410  * @return
10411  *   Returns tables resource based on the index, NULL in case of failed.
10412  */
10413 struct mlx5_flow_tbl_resource *
10414 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10415                          uint32_t table_level, uint8_t egress,
10416                          uint8_t transfer,
10417                          bool external,
10418                          const struct mlx5_flow_tunnel *tunnel,
10419                          uint32_t group_id, uint8_t dummy,
10420                          uint32_t table_id,
10421                          struct rte_flow_error *error)
10422 {
10423         struct mlx5_priv *priv = dev->data->dev_private;
10424         union mlx5_flow_tbl_key table_key = {
10425                 {
10426                         .level = table_level,
10427                         .id = table_id,
10428                         .reserved = 0,
10429                         .dummy = !!dummy,
10430                         .is_fdb = !!transfer,
10431                         .is_egress = !!egress,
10432                 }
10433         };
10434         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10435                 .tunnel = tunnel,
10436                 .group_id = group_id,
10437                 .external = external,
10438         };
10439         struct mlx5_flow_cb_ctx ctx = {
10440                 .dev = dev,
10441                 .error = error,
10442                 .data = &table_key.v64,
10443                 .data2 = &tt_prm,
10444         };
10445         struct mlx5_list_entry *entry;
10446         struct mlx5_flow_tbl_data_entry *tbl_data;
10447
10448         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10449         if (!entry) {
10450                 rte_flow_error_set(error, ENOMEM,
10451                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10452                                    "cannot get table");
10453                 return NULL;
10454         }
10455         DRV_LOG(DEBUG, "table_level %u table_id %u "
10456                 "tunnel %u group %u registered.",
10457                 table_level, table_id,
10458                 tunnel ? tunnel->tunnel_id : 0, group_id);
10459         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10460         return &tbl_data->tbl;
10461 }
10462
10463 void
10464 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10465 {
10466         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10467         struct mlx5_flow_tbl_data_entry *tbl_data =
10468                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10469
10470         MLX5_ASSERT(entry && sh);
10471         if (tbl_data->jump.action)
10472                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10473         if (tbl_data->tbl.obj)
10474                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10475         if (tbl_data->tunnel_offload && tbl_data->external) {
10476                 struct mlx5_list_entry *he;
10477                 struct mlx5_hlist *tunnel_grp_hash;
10478                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10479                 union tunnel_tbl_key tunnel_key = {
10480                         .tunnel_id = tbl_data->tunnel ?
10481                                         tbl_data->tunnel->tunnel_id : 0,
10482                         .group = tbl_data->group_id
10483                 };
10484                 uint32_t table_level = tbl_data->level;
10485                 struct mlx5_flow_cb_ctx ctx = {
10486                         .data = (void *)&tunnel_key.val,
10487                 };
10488
10489                 tunnel_grp_hash = tbl_data->tunnel ?
10490                                         tbl_data->tunnel->groups :
10491                                         thub->groups;
10492                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10493                 if (he)
10494                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10495                 DRV_LOG(DEBUG,
10496                         "table_level %u id %u tunnel %u group %u released.",
10497                         table_level,
10498                         tbl_data->id,
10499                         tbl_data->tunnel ?
10500                         tbl_data->tunnel->tunnel_id : 0,
10501                         tbl_data->group_id);
10502         }
10503         mlx5_list_destroy(tbl_data->matchers);
10504         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10505 }
10506
10507 /**
10508  * Release a flow table.
10509  *
10510  * @param[in] sh
10511  *   Pointer to device shared structure.
10512  * @param[in] tbl
10513  *   Table resource to be released.
10514  *
10515  * @return
10516  *   Returns 0 if table was released, else return 1;
10517  */
10518 static int
10519 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10520                              struct mlx5_flow_tbl_resource *tbl)
10521 {
10522         struct mlx5_flow_tbl_data_entry *tbl_data =
10523                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10524
10525         if (!tbl)
10526                 return 0;
10527         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10528 }
10529
10530 int
10531 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10532                          struct mlx5_list_entry *entry, void *cb_ctx)
10533 {
10534         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10535         struct mlx5_flow_dv_matcher *ref = ctx->data;
10536         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10537                                                         entry);
10538
10539         return cur->crc != ref->crc ||
10540                cur->priority != ref->priority ||
10541                memcmp((const void *)cur->mask.buf,
10542                       (const void *)ref->mask.buf, ref->mask.size);
10543 }
10544
10545 struct mlx5_list_entry *
10546 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10547 {
10548         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10549         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10550         struct mlx5_flow_dv_matcher *ref = ctx->data;
10551         struct mlx5_flow_dv_matcher *resource;
10552         struct mlx5dv_flow_matcher_attr dv_attr = {
10553                 .type = IBV_FLOW_ATTR_NORMAL,
10554                 .match_mask = (void *)&ref->mask,
10555         };
10556         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10557                                                             typeof(*tbl), tbl);
10558         int ret;
10559
10560         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10561                                SOCKET_ID_ANY);
10562         if (!resource) {
10563                 rte_flow_error_set(ctx->error, ENOMEM,
10564                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10565                                    "cannot create matcher");
10566                 return NULL;
10567         }
10568         *resource = *ref;
10569         dv_attr.match_criteria_enable =
10570                 flow_dv_matcher_enable(resource->mask.buf);
10571         __flow_dv_adjust_buf_size(&ref->mask.size,
10572                                   dv_attr.match_criteria_enable);
10573         dv_attr.priority = ref->priority;
10574         if (tbl->is_egress)
10575                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10576         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10577                                                tbl->tbl.obj,
10578                                                &resource->matcher_object);
10579         if (ret) {
10580                 mlx5_free(resource);
10581                 rte_flow_error_set(ctx->error, ENOMEM,
10582                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10583                                    "cannot create matcher");
10584                 return NULL;
10585         }
10586         return &resource->entry;
10587 }
10588
10589 /**
10590  * Register the flow matcher.
10591  *
10592  * @param[in, out] dev
10593  *   Pointer to rte_eth_dev structure.
10594  * @param[in, out] matcher
10595  *   Pointer to flow matcher.
10596  * @param[in, out] key
10597  *   Pointer to flow table key.
10598  * @parm[in, out] dev_flow
10599  *   Pointer to the dev_flow.
10600  * @param[out] error
10601  *   pointer to error structure.
10602  *
10603  * @return
10604  *   0 on success otherwise -errno and errno is set.
10605  */
10606 static int
10607 flow_dv_matcher_register(struct rte_eth_dev *dev,
10608                          struct mlx5_flow_dv_matcher *ref,
10609                          union mlx5_flow_tbl_key *key,
10610                          struct mlx5_flow *dev_flow,
10611                          const struct mlx5_flow_tunnel *tunnel,
10612                          uint32_t group_id,
10613                          struct rte_flow_error *error)
10614 {
10615         struct mlx5_list_entry *entry;
10616         struct mlx5_flow_dv_matcher *resource;
10617         struct mlx5_flow_tbl_resource *tbl;
10618         struct mlx5_flow_tbl_data_entry *tbl_data;
10619         struct mlx5_flow_cb_ctx ctx = {
10620                 .error = error,
10621                 .data = ref,
10622         };
10623         /**
10624          * tunnel offload API requires this registration for cases when
10625          * tunnel match rule was inserted before tunnel set rule.
10626          */
10627         tbl = flow_dv_tbl_resource_get(dev, key->level,
10628                                        key->is_egress, key->is_fdb,
10629                                        dev_flow->external, tunnel,
10630                                        group_id, 0, key->id, error);
10631         if (!tbl)
10632                 return -rte_errno;      /* No need to refill the error info */
10633         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10634         ref->tbl = tbl;
10635         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10636         if (!entry) {
10637                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10638                 return rte_flow_error_set(error, ENOMEM,
10639                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10640                                           "cannot allocate ref memory");
10641         }
10642         resource = container_of(entry, typeof(*resource), entry);
10643         dev_flow->handle->dvh.matcher = resource;
10644         return 0;
10645 }
10646
10647 struct mlx5_list_entry *
10648 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10649 {
10650         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10651         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10652         struct mlx5_flow_dv_tag_resource *entry;
10653         uint32_t idx = 0;
10654         int ret;
10655
10656         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10657         if (!entry) {
10658                 rte_flow_error_set(ctx->error, ENOMEM,
10659                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10660                                    "cannot allocate resource memory");
10661                 return NULL;
10662         }
10663         entry->idx = idx;
10664         entry->tag_id = *(uint32_t *)(ctx->data);
10665         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10666                                                   &entry->action);
10667         if (ret) {
10668                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10669                 rte_flow_error_set(ctx->error, ENOMEM,
10670                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10671                                    NULL, "cannot create action");
10672                 return NULL;
10673         }
10674         return &entry->entry;
10675 }
10676
10677 int
10678 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10679                      void *cb_ctx)
10680 {
10681         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10682         struct mlx5_flow_dv_tag_resource *tag =
10683                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10684
10685         return *(uint32_t *)(ctx->data) != tag->tag_id;
10686 }
10687
10688 struct mlx5_list_entry *
10689 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10690                      void *cb_ctx)
10691 {
10692         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10693         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10694         struct mlx5_flow_dv_tag_resource *entry;
10695         uint32_t idx = 0;
10696
10697         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10698         if (!entry) {
10699                 rte_flow_error_set(ctx->error, ENOMEM,
10700                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10701                                    "cannot allocate tag resource memory");
10702                 return NULL;
10703         }
10704         memcpy(entry, oentry, sizeof(*entry));
10705         entry->idx = idx;
10706         return &entry->entry;
10707 }
10708
10709 void
10710 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10711 {
10712         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10713         struct mlx5_flow_dv_tag_resource *tag =
10714                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10715
10716         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10717 }
10718
10719 /**
10720  * Find existing tag resource or create and register a new one.
10721  *
10722  * @param dev[in, out]
10723  *   Pointer to rte_eth_dev structure.
10724  * @param[in, out] tag_be24
10725  *   Tag value in big endian then R-shift 8.
10726  * @parm[in, out] dev_flow
10727  *   Pointer to the dev_flow.
10728  * @param[out] error
10729  *   pointer to error structure.
10730  *
10731  * @return
10732  *   0 on success otherwise -errno and errno is set.
10733  */
10734 static int
10735 flow_dv_tag_resource_register
10736                         (struct rte_eth_dev *dev,
10737                          uint32_t tag_be24,
10738                          struct mlx5_flow *dev_flow,
10739                          struct rte_flow_error *error)
10740 {
10741         struct mlx5_priv *priv = dev->data->dev_private;
10742         struct mlx5_flow_dv_tag_resource *resource;
10743         struct mlx5_list_entry *entry;
10744         struct mlx5_flow_cb_ctx ctx = {
10745                                         .error = error,
10746                                         .data = &tag_be24,
10747                                         };
10748         struct mlx5_hlist *tag_table;
10749
10750         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10751                                       "tags",
10752                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10753                                       false, false, priv->sh,
10754                                       flow_dv_tag_create_cb,
10755                                       flow_dv_tag_match_cb,
10756                                       flow_dv_tag_remove_cb,
10757                                       flow_dv_tag_clone_cb,
10758                                       flow_dv_tag_clone_free_cb);
10759         if (unlikely(!tag_table))
10760                 return -rte_errno;
10761         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10762         if (entry) {
10763                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10764                                         entry);
10765                 dev_flow->handle->dvh.rix_tag = resource->idx;
10766                 dev_flow->dv.tag_resource = resource;
10767                 return 0;
10768         }
10769         return -rte_errno;
10770 }
10771
10772 void
10773 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10774 {
10775         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10776         struct mlx5_flow_dv_tag_resource *tag =
10777                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10778
10779         MLX5_ASSERT(tag && sh && tag->action);
10780         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10781         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10782         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10783 }
10784
10785 /**
10786  * Release the tag.
10787  *
10788  * @param dev
10789  *   Pointer to Ethernet device.
10790  * @param tag_idx
10791  *   Tag index.
10792  *
10793  * @return
10794  *   1 while a reference on it exists, 0 when freed.
10795  */
10796 static int
10797 flow_dv_tag_release(struct rte_eth_dev *dev,
10798                     uint32_t tag_idx)
10799 {
10800         struct mlx5_priv *priv = dev->data->dev_private;
10801         struct mlx5_flow_dv_tag_resource *tag;
10802
10803         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10804         if (!tag)
10805                 return 0;
10806         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10807                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10808         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10809 }
10810
10811 /**
10812  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10813  *
10814  * @param[in] dev
10815  *   Pointer to rte_eth_dev structure.
10816  * @param[in] action
10817  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10818  * @param[out] dst_port_id
10819  *   The target port ID.
10820  * @param[out] error
10821  *   Pointer to the error structure.
10822  *
10823  * @return
10824  *   0 on success, a negative errno value otherwise and rte_errno is set.
10825  */
10826 static int
10827 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10828                                  const struct rte_flow_action *action,
10829                                  uint32_t *dst_port_id,
10830                                  struct rte_flow_error *error)
10831 {
10832         uint32_t port;
10833         struct mlx5_priv *priv;
10834
10835         switch (action->type) {
10836         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10837                 const struct rte_flow_action_port_id *conf;
10838
10839                 conf = (const struct rte_flow_action_port_id *)action->conf;
10840                 port = conf->original ? dev->data->port_id : conf->id;
10841                 break;
10842         }
10843         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10844                 const struct rte_flow_action_ethdev *ethdev;
10845
10846                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10847                 port = ethdev->port_id;
10848                 break;
10849         }
10850         default:
10851                 MLX5_ASSERT(false);
10852                 return rte_flow_error_set(error, EINVAL,
10853                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10854                                           "unknown E-Switch action");
10855         }
10856
10857         priv = mlx5_port_to_eswitch_info(port, false);
10858         if (!priv)
10859                 return rte_flow_error_set(error, -rte_errno,
10860                                           RTE_FLOW_ERROR_TYPE_ACTION,
10861                                           NULL,
10862                                           "No eswitch info was found for port");
10863 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10864         /*
10865          * This parameter is transferred to
10866          * mlx5dv_dr_action_create_dest_ib_port().
10867          */
10868         *dst_port_id = priv->dev_port;
10869 #else
10870         /*
10871          * Legacy mode, no LAG configurations is supported.
10872          * This parameter is transferred to
10873          * mlx5dv_dr_action_create_dest_vport().
10874          */
10875         *dst_port_id = priv->vport_id;
10876 #endif
10877         return 0;
10878 }
10879
10880 /**
10881  * Create a counter with aging configuration.
10882  *
10883  * @param[in] dev
10884  *   Pointer to rte_eth_dev structure.
10885  * @param[in] dev_flow
10886  *   Pointer to the mlx5_flow.
10887  * @param[out] count
10888  *   Pointer to the counter action configuration.
10889  * @param[in] age
10890  *   Pointer to the aging action configuration.
10891  *
10892  * @return
10893  *   Index to flow counter on success, 0 otherwise.
10894  */
10895 static uint32_t
10896 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10897                                 struct mlx5_flow *dev_flow,
10898                                 const struct rte_flow_action_count *count
10899                                         __rte_unused,
10900                                 const struct rte_flow_action_age *age)
10901 {
10902         uint32_t counter;
10903         struct mlx5_age_param *age_param;
10904
10905         counter = flow_dv_counter_alloc(dev, !!age);
10906         if (!counter || age == NULL)
10907                 return counter;
10908         age_param = flow_dv_counter_idx_get_age(dev, counter);
10909         age_param->context = age->context ? age->context :
10910                 (void *)(uintptr_t)(dev_flow->flow_idx);
10911         age_param->timeout = age->timeout;
10912         age_param->port_id = dev->data->port_id;
10913         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10914         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10915         return counter;
10916 }
10917
10918 /**
10919  * Add Tx queue matcher
10920  *
10921  * @param[in] dev
10922  *   Pointer to the dev struct.
10923  * @param[in, out] matcher
10924  *   Flow matcher.
10925  * @param[in, out] key
10926  *   Flow matcher value.
10927  * @param[in] item
10928  *   Flow pattern to translate.
10929  * @param[in] inner
10930  *   Item is inner pattern.
10931  */
10932 static void
10933 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10934                                 void *matcher, void *key,
10935                                 const struct rte_flow_item *item)
10936 {
10937         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10938         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10939         void *misc_m =
10940                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10941         void *misc_v =
10942                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10943         struct mlx5_txq_ctrl *txq;
10944         uint32_t queue, mask;
10945
10946         queue_m = (const void *)item->mask;
10947         queue_v = (const void *)item->spec;
10948         if (!queue_v)
10949                 return;
10950         txq = mlx5_txq_get(dev, queue_v->queue);
10951         if (!txq)
10952                 return;
10953         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10954                 queue = txq->obj->sq->id;
10955         else
10956                 queue = txq->obj->sq_obj.sq->id;
10957         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10958         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10959         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10960         mlx5_txq_release(dev, queue_v->queue);
10961 }
10962
10963 /**
10964  * Set the hash fields according to the @p flow information.
10965  *
10966  * @param[in] dev_flow
10967  *   Pointer to the mlx5_flow.
10968  * @param[in] rss_desc
10969  *   Pointer to the mlx5_flow_rss_desc.
10970  */
10971 static void
10972 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10973                        struct mlx5_flow_rss_desc *rss_desc)
10974 {
10975         uint64_t items = dev_flow->handle->layers;
10976         int rss_inner = 0;
10977         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10978
10979         dev_flow->hash_fields = 0;
10980 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10981         if (rss_desc->level >= 2)
10982                 rss_inner = 1;
10983 #endif
10984         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10985             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10986                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10987                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10988                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10989                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10990                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10991                         else
10992                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10993                 }
10994         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10995                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10996                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10997                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10998                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10999                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11000                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
11001                         else
11002                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
11003                 }
11004         }
11005         if (dev_flow->hash_fields == 0)
11006                 /*
11007                  * There is no match between the RSS types and the
11008                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11009                  */
11010                 return;
11011         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11012             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
11013                 if (rss_types & RTE_ETH_RSS_UDP) {
11014                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11015                                 dev_flow->hash_fields |=
11016                                                 IBV_RX_HASH_SRC_PORT_UDP;
11017                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11018                                 dev_flow->hash_fields |=
11019                                                 IBV_RX_HASH_DST_PORT_UDP;
11020                         else
11021                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
11022                 }
11023         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11024                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
11025                 if (rss_types & RTE_ETH_RSS_TCP) {
11026                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11027                                 dev_flow->hash_fields |=
11028                                                 IBV_RX_HASH_SRC_PORT_TCP;
11029                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11030                                 dev_flow->hash_fields |=
11031                                                 IBV_RX_HASH_DST_PORT_TCP;
11032                         else
11033                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
11034                 }
11035         }
11036         if (rss_inner)
11037                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
11038 }
11039
11040 /**
11041  * Prepare an Rx Hash queue.
11042  *
11043  * @param dev
11044  *   Pointer to Ethernet device.
11045  * @param[in] dev_flow
11046  *   Pointer to the mlx5_flow.
11047  * @param[in] rss_desc
11048  *   Pointer to the mlx5_flow_rss_desc.
11049  * @param[out] hrxq_idx
11050  *   Hash Rx queue index.
11051  *
11052  * @return
11053  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11054  */
11055 static struct mlx5_hrxq *
11056 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11057                      struct mlx5_flow *dev_flow,
11058                      struct mlx5_flow_rss_desc *rss_desc,
11059                      uint32_t *hrxq_idx)
11060 {
11061         struct mlx5_priv *priv = dev->data->dev_private;
11062         struct mlx5_flow_handle *dh = dev_flow->handle;
11063         struct mlx5_hrxq *hrxq;
11064
11065         MLX5_ASSERT(rss_desc->queue_num);
11066         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11067         rss_desc->hash_fields = dev_flow->hash_fields;
11068         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11069         rss_desc->shared_rss = 0;
11070         if (rss_desc->hash_fields == 0)
11071                 rss_desc->queue_num = 1;
11072         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11073         if (!*hrxq_idx)
11074                 return NULL;
11075         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11076                               *hrxq_idx);
11077         return hrxq;
11078 }
11079
11080 /**
11081  * Release sample sub action resource.
11082  *
11083  * @param[in, out] dev
11084  *   Pointer to rte_eth_dev structure.
11085  * @param[in] act_res
11086  *   Pointer to sample sub action resource.
11087  */
11088 static void
11089 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11090                                    struct mlx5_flow_sub_actions_idx *act_res)
11091 {
11092         if (act_res->rix_hrxq) {
11093                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11094                 act_res->rix_hrxq = 0;
11095         }
11096         if (act_res->rix_encap_decap) {
11097                 flow_dv_encap_decap_resource_release(dev,
11098                                                      act_res->rix_encap_decap);
11099                 act_res->rix_encap_decap = 0;
11100         }
11101         if (act_res->rix_port_id_action) {
11102                 flow_dv_port_id_action_resource_release(dev,
11103                                                 act_res->rix_port_id_action);
11104                 act_res->rix_port_id_action = 0;
11105         }
11106         if (act_res->rix_tag) {
11107                 flow_dv_tag_release(dev, act_res->rix_tag);
11108                 act_res->rix_tag = 0;
11109         }
11110         if (act_res->rix_jump) {
11111                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11112                 act_res->rix_jump = 0;
11113         }
11114 }
11115
11116 int
11117 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11118                         struct mlx5_list_entry *entry, void *cb_ctx)
11119 {
11120         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11121         struct rte_eth_dev *dev = ctx->dev;
11122         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11123         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11124                                                               typeof(*resource),
11125                                                               entry);
11126
11127         if (ctx_resource->ratio == resource->ratio &&
11128             ctx_resource->ft_type == resource->ft_type &&
11129             ctx_resource->ft_id == resource->ft_id &&
11130             ctx_resource->set_action == resource->set_action &&
11131             !memcmp((void *)&ctx_resource->sample_act,
11132                     (void *)&resource->sample_act,
11133                     sizeof(struct mlx5_flow_sub_actions_list))) {
11134                 /*
11135                  * Existing sample action should release the prepared
11136                  * sub-actions reference counter.
11137                  */
11138                 flow_dv_sample_sub_actions_release(dev,
11139                                                    &ctx_resource->sample_idx);
11140                 return 0;
11141         }
11142         return 1;
11143 }
11144
11145 struct mlx5_list_entry *
11146 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11147 {
11148         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11149         struct rte_eth_dev *dev = ctx->dev;
11150         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11151         void **sample_dv_actions = ctx_resource->sub_actions;
11152         struct mlx5_flow_dv_sample_resource *resource;
11153         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11154         struct mlx5_priv *priv = dev->data->dev_private;
11155         struct mlx5_dev_ctx_shared *sh = priv->sh;
11156         struct mlx5_flow_tbl_resource *tbl;
11157         uint32_t idx = 0;
11158         const uint32_t next_ft_step = 1;
11159         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11160         uint8_t is_egress = 0;
11161         uint8_t is_transfer = 0;
11162         struct rte_flow_error *error = ctx->error;
11163
11164         /* Register new sample resource. */
11165         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11166         if (!resource) {
11167                 rte_flow_error_set(error, ENOMEM,
11168                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11169                                           NULL,
11170                                           "cannot allocate resource memory");
11171                 return NULL;
11172         }
11173         *resource = *ctx_resource;
11174         /* Create normal path table level */
11175         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11176                 is_transfer = 1;
11177         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11178                 is_egress = 1;
11179         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11180                                         is_egress, is_transfer,
11181                                         true, NULL, 0, 0, 0, error);
11182         if (!tbl) {
11183                 rte_flow_error_set(error, ENOMEM,
11184                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11185                                           NULL,
11186                                           "fail to create normal path table "
11187                                           "for sample");
11188                 goto error;
11189         }
11190         resource->normal_path_tbl = tbl;
11191         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11192                 if (!sh->default_miss_action) {
11193                         rte_flow_error_set(error, ENOMEM,
11194                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11195                                                 NULL,
11196                                                 "default miss action was not "
11197                                                 "created");
11198                         goto error;
11199                 }
11200                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11201                                                 sh->default_miss_action;
11202         }
11203         /* Create a DR sample action */
11204         sampler_attr.sample_ratio = resource->ratio;
11205         sampler_attr.default_next_table = tbl->obj;
11206         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11207         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11208                                                         &sample_dv_actions[0];
11209         sampler_attr.action = resource->set_action;
11210         if (mlx5_os_flow_dr_create_flow_action_sampler
11211                         (&sampler_attr, &resource->verbs_action)) {
11212                 rte_flow_error_set(error, ENOMEM,
11213                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11214                                         NULL, "cannot create sample action");
11215                 goto error;
11216         }
11217         resource->idx = idx;
11218         resource->dev = dev;
11219         return &resource->entry;
11220 error:
11221         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11222                 flow_dv_sample_sub_actions_release(dev,
11223                                                    &resource->sample_idx);
11224         if (resource->normal_path_tbl)
11225                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11226                                 resource->normal_path_tbl);
11227         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11228         return NULL;
11229
11230 }
11231
11232 struct mlx5_list_entry *
11233 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11234                          struct mlx5_list_entry *entry __rte_unused,
11235                          void *cb_ctx)
11236 {
11237         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11238         struct rte_eth_dev *dev = ctx->dev;
11239         struct mlx5_flow_dv_sample_resource *resource;
11240         struct mlx5_priv *priv = dev->data->dev_private;
11241         struct mlx5_dev_ctx_shared *sh = priv->sh;
11242         uint32_t idx = 0;
11243
11244         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11245         if (!resource) {
11246                 rte_flow_error_set(ctx->error, ENOMEM,
11247                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11248                                           NULL,
11249                                           "cannot allocate resource memory");
11250                 return NULL;
11251         }
11252         memcpy(resource, entry, sizeof(*resource));
11253         resource->idx = idx;
11254         resource->dev = dev;
11255         return &resource->entry;
11256 }
11257
11258 void
11259 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11260                              struct mlx5_list_entry *entry)
11261 {
11262         struct mlx5_flow_dv_sample_resource *resource =
11263                                   container_of(entry, typeof(*resource), entry);
11264         struct rte_eth_dev *dev = resource->dev;
11265         struct mlx5_priv *priv = dev->data->dev_private;
11266
11267         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11268 }
11269
11270 /**
11271  * Find existing sample resource or create and register a new one.
11272  *
11273  * @param[in, out] dev
11274  *   Pointer to rte_eth_dev structure.
11275  * @param[in] ref
11276  *   Pointer to sample resource reference.
11277  * @parm[in, out] dev_flow
11278  *   Pointer to the dev_flow.
11279  * @param[out] error
11280  *   pointer to error structure.
11281  *
11282  * @return
11283  *   0 on success otherwise -errno and errno is set.
11284  */
11285 static int
11286 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11287                          struct mlx5_flow_dv_sample_resource *ref,
11288                          struct mlx5_flow *dev_flow,
11289                          struct rte_flow_error *error)
11290 {
11291         struct mlx5_flow_dv_sample_resource *resource;
11292         struct mlx5_list_entry *entry;
11293         struct mlx5_priv *priv = dev->data->dev_private;
11294         struct mlx5_flow_cb_ctx ctx = {
11295                 .dev = dev,
11296                 .error = error,
11297                 .data = ref,
11298         };
11299
11300         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11301         if (!entry)
11302                 return -rte_errno;
11303         resource = container_of(entry, typeof(*resource), entry);
11304         dev_flow->handle->dvh.rix_sample = resource->idx;
11305         dev_flow->dv.sample_res = resource;
11306         return 0;
11307 }
11308
11309 int
11310 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11311                             struct mlx5_list_entry *entry, void *cb_ctx)
11312 {
11313         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11314         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11315         struct rte_eth_dev *dev = ctx->dev;
11316         struct mlx5_flow_dv_dest_array_resource *resource =
11317                                   container_of(entry, typeof(*resource), entry);
11318         uint32_t idx = 0;
11319
11320         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11321             ctx_resource->ft_type == resource->ft_type &&
11322             !memcmp((void *)resource->sample_act,
11323                     (void *)ctx_resource->sample_act,
11324                    (ctx_resource->num_of_dest *
11325                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11326                 /*
11327                  * Existing sample action should release the prepared
11328                  * sub-actions reference counter.
11329                  */
11330                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11331                         flow_dv_sample_sub_actions_release(dev,
11332                                         &ctx_resource->sample_idx[idx]);
11333                 return 0;
11334         }
11335         return 1;
11336 }
11337
11338 struct mlx5_list_entry *
11339 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11340 {
11341         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11342         struct rte_eth_dev *dev = ctx->dev;
11343         struct mlx5_flow_dv_dest_array_resource *resource;
11344         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11345         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11346         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11347         struct mlx5_priv *priv = dev->data->dev_private;
11348         struct mlx5_dev_ctx_shared *sh = priv->sh;
11349         struct mlx5_flow_sub_actions_list *sample_act;
11350         struct mlx5dv_dr_domain *domain;
11351         uint32_t idx = 0, res_idx = 0;
11352         struct rte_flow_error *error = ctx->error;
11353         uint64_t action_flags;
11354         int ret;
11355
11356         /* Register new destination array resource. */
11357         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11358                                             &res_idx);
11359         if (!resource) {
11360                 rte_flow_error_set(error, ENOMEM,
11361                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11362                                           NULL,
11363                                           "cannot allocate resource memory");
11364                 return NULL;
11365         }
11366         *resource = *ctx_resource;
11367         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11368                 domain = sh->fdb_domain;
11369         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11370                 domain = sh->rx_domain;
11371         else
11372                 domain = sh->tx_domain;
11373         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11374                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11375                                  mlx5_malloc(MLX5_MEM_ZERO,
11376                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11377                                  0, SOCKET_ID_ANY);
11378                 if (!dest_attr[idx]) {
11379                         rte_flow_error_set(error, ENOMEM,
11380                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11381                                            NULL,
11382                                            "cannot allocate resource memory");
11383                         goto error;
11384                 }
11385                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11386                 sample_act = &ctx_resource->sample_act[idx];
11387                 action_flags = sample_act->action_flags;
11388                 switch (action_flags) {
11389                 case MLX5_FLOW_ACTION_QUEUE:
11390                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11391                         break;
11392                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11393                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11394                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11395                         dest_attr[idx]->dest_reformat->reformat =
11396                                         sample_act->dr_encap_action;
11397                         dest_attr[idx]->dest_reformat->dest =
11398                                         sample_act->dr_port_id_action;
11399                         break;
11400                 case MLX5_FLOW_ACTION_PORT_ID:
11401                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11402                         break;
11403                 case MLX5_FLOW_ACTION_JUMP:
11404                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11405                         break;
11406                 default:
11407                         rte_flow_error_set(error, EINVAL,
11408                                            RTE_FLOW_ERROR_TYPE_ACTION,
11409                                            NULL,
11410                                            "unsupported actions type");
11411                         goto error;
11412                 }
11413         }
11414         /* create a dest array action */
11415         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11416                                                 (domain,
11417                                                  resource->num_of_dest,
11418                                                  dest_attr,
11419                                                  &resource->action);
11420         if (ret) {
11421                 rte_flow_error_set(error, ENOMEM,
11422                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11423                                    NULL,
11424                                    "cannot create destination array action");
11425                 goto error;
11426         }
11427         resource->idx = res_idx;
11428         resource->dev = dev;
11429         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11430                 mlx5_free(dest_attr[idx]);
11431         return &resource->entry;
11432 error:
11433         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11434                 flow_dv_sample_sub_actions_release(dev,
11435                                                    &resource->sample_idx[idx]);
11436                 if (dest_attr[idx])
11437                         mlx5_free(dest_attr[idx]);
11438         }
11439         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11440         return NULL;
11441 }
11442
11443 struct mlx5_list_entry *
11444 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11445                             struct mlx5_list_entry *entry __rte_unused,
11446                             void *cb_ctx)
11447 {
11448         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11449         struct rte_eth_dev *dev = ctx->dev;
11450         struct mlx5_flow_dv_dest_array_resource *resource;
11451         struct mlx5_priv *priv = dev->data->dev_private;
11452         struct mlx5_dev_ctx_shared *sh = priv->sh;
11453         uint32_t res_idx = 0;
11454         struct rte_flow_error *error = ctx->error;
11455
11456         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11457                                       &res_idx);
11458         if (!resource) {
11459                 rte_flow_error_set(error, ENOMEM,
11460                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11461                                           NULL,
11462                                           "cannot allocate dest-array memory");
11463                 return NULL;
11464         }
11465         memcpy(resource, entry, sizeof(*resource));
11466         resource->idx = res_idx;
11467         resource->dev = dev;
11468         return &resource->entry;
11469 }
11470
11471 void
11472 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11473                                  struct mlx5_list_entry *entry)
11474 {
11475         struct mlx5_flow_dv_dest_array_resource *resource =
11476                         container_of(entry, typeof(*resource), entry);
11477         struct rte_eth_dev *dev = resource->dev;
11478         struct mlx5_priv *priv = dev->data->dev_private;
11479
11480         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11481 }
11482
11483 /**
11484  * Find existing destination array resource or create and register a new one.
11485  *
11486  * @param[in, out] dev
11487  *   Pointer to rte_eth_dev structure.
11488  * @param[in] ref
11489  *   Pointer to destination array resource reference.
11490  * @parm[in, out] dev_flow
11491  *   Pointer to the dev_flow.
11492  * @param[out] error
11493  *   pointer to error structure.
11494  *
11495  * @return
11496  *   0 on success otherwise -errno and errno is set.
11497  */
11498 static int
11499 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11500                          struct mlx5_flow_dv_dest_array_resource *ref,
11501                          struct mlx5_flow *dev_flow,
11502                          struct rte_flow_error *error)
11503 {
11504         struct mlx5_flow_dv_dest_array_resource *resource;
11505         struct mlx5_priv *priv = dev->data->dev_private;
11506         struct mlx5_list_entry *entry;
11507         struct mlx5_flow_cb_ctx ctx = {
11508                 .dev = dev,
11509                 .error = error,
11510                 .data = ref,
11511         };
11512
11513         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11514         if (!entry)
11515                 return -rte_errno;
11516         resource = container_of(entry, typeof(*resource), entry);
11517         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11518         dev_flow->dv.dest_array_res = resource;
11519         return 0;
11520 }
11521
11522 /**
11523  * Convert Sample action to DV specification.
11524  *
11525  * @param[in] dev
11526  *   Pointer to rte_eth_dev structure.
11527  * @param[in] action
11528  *   Pointer to sample action structure.
11529  * @param[in, out] dev_flow
11530  *   Pointer to the mlx5_flow.
11531  * @param[in] attr
11532  *   Pointer to the flow attributes.
11533  * @param[in, out] num_of_dest
11534  *   Pointer to the num of destination.
11535  * @param[in, out] sample_actions
11536  *   Pointer to sample actions list.
11537  * @param[in, out] res
11538  *   Pointer to sample resource.
11539  * @param[out] error
11540  *   Pointer to the error structure.
11541  *
11542  * @return
11543  *   0 on success, a negative errno value otherwise and rte_errno is set.
11544  */
11545 static int
11546 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11547                                 const struct rte_flow_action_sample *action,
11548                                 struct mlx5_flow *dev_flow,
11549                                 const struct rte_flow_attr *attr,
11550                                 uint32_t *num_of_dest,
11551                                 void **sample_actions,
11552                                 struct mlx5_flow_dv_sample_resource *res,
11553                                 struct rte_flow_error *error)
11554 {
11555         struct mlx5_priv *priv = dev->data->dev_private;
11556         const struct rte_flow_action *sub_actions;
11557         struct mlx5_flow_sub_actions_list *sample_act;
11558         struct mlx5_flow_sub_actions_idx *sample_idx;
11559         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11560         struct rte_flow *flow = dev_flow->flow;
11561         struct mlx5_flow_rss_desc *rss_desc;
11562         uint64_t action_flags = 0;
11563
11564         MLX5_ASSERT(wks);
11565         rss_desc = &wks->rss_desc;
11566         sample_act = &res->sample_act;
11567         sample_idx = &res->sample_idx;
11568         res->ratio = action->ratio;
11569         sub_actions = action->actions;
11570         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11571                 int type = sub_actions->type;
11572                 uint32_t pre_rix = 0;
11573                 void *pre_r;
11574                 switch (type) {
11575                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11576                 {
11577                         const struct rte_flow_action_queue *queue;
11578                         struct mlx5_hrxq *hrxq;
11579                         uint32_t hrxq_idx;
11580
11581                         queue = sub_actions->conf;
11582                         rss_desc->queue_num = 1;
11583                         rss_desc->queue[0] = queue->index;
11584                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11585                                                     rss_desc, &hrxq_idx);
11586                         if (!hrxq)
11587                                 return rte_flow_error_set
11588                                         (error, rte_errno,
11589                                          RTE_FLOW_ERROR_TYPE_ACTION,
11590                                          NULL,
11591                                          "cannot create fate queue");
11592                         sample_act->dr_queue_action = hrxq->action;
11593                         sample_idx->rix_hrxq = hrxq_idx;
11594                         sample_actions[sample_act->actions_num++] =
11595                                                 hrxq->action;
11596                         (*num_of_dest)++;
11597                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11598                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11599                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11600                         dev_flow->handle->fate_action =
11601                                         MLX5_FLOW_FATE_QUEUE;
11602                         break;
11603                 }
11604                 case RTE_FLOW_ACTION_TYPE_RSS:
11605                 {
11606                         struct mlx5_hrxq *hrxq;
11607                         uint32_t hrxq_idx;
11608                         const struct rte_flow_action_rss *rss;
11609                         const uint8_t *rss_key;
11610
11611                         rss = sub_actions->conf;
11612                         memcpy(rss_desc->queue, rss->queue,
11613                                rss->queue_num * sizeof(uint16_t));
11614                         rss_desc->queue_num = rss->queue_num;
11615                         /* NULL RSS key indicates default RSS key. */
11616                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11617                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11618                         /*
11619                          * rss->level and rss.types should be set in advance
11620                          * when expanding items for RSS.
11621                          */
11622                         flow_dv_hashfields_set(dev_flow, rss_desc);
11623                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11624                                                     rss_desc, &hrxq_idx);
11625                         if (!hrxq)
11626                                 return rte_flow_error_set
11627                                         (error, rte_errno,
11628                                          RTE_FLOW_ERROR_TYPE_ACTION,
11629                                          NULL,
11630                                          "cannot create fate queue");
11631                         sample_act->dr_queue_action = hrxq->action;
11632                         sample_idx->rix_hrxq = hrxq_idx;
11633                         sample_actions[sample_act->actions_num++] =
11634                                                 hrxq->action;
11635                         (*num_of_dest)++;
11636                         action_flags |= MLX5_FLOW_ACTION_RSS;
11637                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11638                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11639                         dev_flow->handle->fate_action =
11640                                         MLX5_FLOW_FATE_QUEUE;
11641                         break;
11642                 }
11643                 case RTE_FLOW_ACTION_TYPE_MARK:
11644                 {
11645                         uint32_t tag_be = mlx5_flow_mark_set
11646                                 (((const struct rte_flow_action_mark *)
11647                                 (sub_actions->conf))->id);
11648
11649                         dev_flow->handle->mark = 1;
11650                         pre_rix = dev_flow->handle->dvh.rix_tag;
11651                         /* Save the mark resource before sample */
11652                         pre_r = dev_flow->dv.tag_resource;
11653                         if (flow_dv_tag_resource_register(dev, tag_be,
11654                                                   dev_flow, error))
11655                                 return -rte_errno;
11656                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11657                         sample_act->dr_tag_action =
11658                                 dev_flow->dv.tag_resource->action;
11659                         sample_idx->rix_tag =
11660                                 dev_flow->handle->dvh.rix_tag;
11661                         sample_actions[sample_act->actions_num++] =
11662                                                 sample_act->dr_tag_action;
11663                         /* Recover the mark resource after sample */
11664                         dev_flow->dv.tag_resource = pre_r;
11665                         dev_flow->handle->dvh.rix_tag = pre_rix;
11666                         action_flags |= MLX5_FLOW_ACTION_MARK;
11667                         break;
11668                 }
11669                 case RTE_FLOW_ACTION_TYPE_COUNT:
11670                 {
11671                         if (!flow->counter) {
11672                                 flow->counter =
11673                                         flow_dv_translate_create_counter(dev,
11674                                                 dev_flow, sub_actions->conf,
11675                                                 0);
11676                                 if (!flow->counter)
11677                                         return rte_flow_error_set
11678                                                 (error, rte_errno,
11679                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11680                                                 NULL,
11681                                                 "cannot create counter"
11682                                                 " object.");
11683                         }
11684                         sample_act->dr_cnt_action =
11685                                   (flow_dv_counter_get_by_idx(dev,
11686                                   flow->counter, NULL))->action;
11687                         sample_actions[sample_act->actions_num++] =
11688                                                 sample_act->dr_cnt_action;
11689                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11690                         break;
11691                 }
11692                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11693                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11694                 {
11695                         struct mlx5_flow_dv_port_id_action_resource
11696                                         port_id_resource;
11697                         uint32_t port_id = 0;
11698
11699                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11700                         /* Save the port id resource before sample */
11701                         pre_rix = dev_flow->handle->rix_port_id_action;
11702                         pre_r = dev_flow->dv.port_id_action;
11703                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11704                                                              &port_id, error))
11705                                 return -rte_errno;
11706                         port_id_resource.port_id = port_id;
11707                         if (flow_dv_port_id_action_resource_register
11708                             (dev, &port_id_resource, dev_flow, error))
11709                                 return -rte_errno;
11710                         sample_act->dr_port_id_action =
11711                                 dev_flow->dv.port_id_action->action;
11712                         sample_idx->rix_port_id_action =
11713                                 dev_flow->handle->rix_port_id_action;
11714                         sample_actions[sample_act->actions_num++] =
11715                                                 sample_act->dr_port_id_action;
11716                         /* Recover the port id resource after sample */
11717                         dev_flow->dv.port_id_action = pre_r;
11718                         dev_flow->handle->rix_port_id_action = pre_rix;
11719                         (*num_of_dest)++;
11720                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11721                         break;
11722                 }
11723                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11724                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11725                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11726                         /* Save the encap resource before sample */
11727                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11728                         pre_r = dev_flow->dv.encap_decap;
11729                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11730                                                            dev_flow,
11731                                                            attr->transfer,
11732                                                            error))
11733                                 return -rte_errno;
11734                         sample_act->dr_encap_action =
11735                                 dev_flow->dv.encap_decap->action;
11736                         sample_idx->rix_encap_decap =
11737                                 dev_flow->handle->dvh.rix_encap_decap;
11738                         sample_actions[sample_act->actions_num++] =
11739                                                 sample_act->dr_encap_action;
11740                         /* Recover the encap resource after sample */
11741                         dev_flow->dv.encap_decap = pre_r;
11742                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11743                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11744                         break;
11745                 default:
11746                         return rte_flow_error_set(error, EINVAL,
11747                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11748                                 NULL,
11749                                 "Not support for sampler action");
11750                 }
11751         }
11752         sample_act->action_flags = action_flags;
11753         res->ft_id = dev_flow->dv.group;
11754         if (attr->transfer) {
11755                 union {
11756                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11757                         uint64_t set_action;
11758                 } action_ctx = { .set_action = 0 };
11759
11760                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11761                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11762                          MLX5_MODIFICATION_TYPE_SET);
11763                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11764                          MLX5_MODI_META_REG_C_0);
11765                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11766                          priv->vport_meta_tag);
11767                 res->set_action = action_ctx.set_action;
11768         } else if (attr->ingress) {
11769                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11770         } else {
11771                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11772         }
11773         return 0;
11774 }
11775
11776 /**
11777  * Convert Sample action to DV specification.
11778  *
11779  * @param[in] dev
11780  *   Pointer to rte_eth_dev structure.
11781  * @param[in, out] dev_flow
11782  *   Pointer to the mlx5_flow.
11783  * @param[in] num_of_dest
11784  *   The num of destination.
11785  * @param[in, out] res
11786  *   Pointer to sample resource.
11787  * @param[in, out] mdest_res
11788  *   Pointer to destination array resource.
11789  * @param[in] sample_actions
11790  *   Pointer to sample path actions list.
11791  * @param[in] action_flags
11792  *   Holds the actions detected until now.
11793  * @param[out] error
11794  *   Pointer to the error structure.
11795  *
11796  * @return
11797  *   0 on success, a negative errno value otherwise and rte_errno is set.
11798  */
11799 static int
11800 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11801                              struct mlx5_flow *dev_flow,
11802                              uint32_t num_of_dest,
11803                              struct mlx5_flow_dv_sample_resource *res,
11804                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11805                              void **sample_actions,
11806                              uint64_t action_flags,
11807                              struct rte_flow_error *error)
11808 {
11809         /* update normal path action resource into last index of array */
11810         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11811         struct mlx5_flow_sub_actions_list *sample_act =
11812                                         &mdest_res->sample_act[dest_index];
11813         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11814         struct mlx5_flow_rss_desc *rss_desc;
11815         uint32_t normal_idx = 0;
11816         struct mlx5_hrxq *hrxq;
11817         uint32_t hrxq_idx;
11818
11819         MLX5_ASSERT(wks);
11820         rss_desc = &wks->rss_desc;
11821         if (num_of_dest > 1) {
11822                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11823                         /* Handle QP action for mirroring */
11824                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11825                                                     rss_desc, &hrxq_idx);
11826                         if (!hrxq)
11827                                 return rte_flow_error_set
11828                                      (error, rte_errno,
11829                                       RTE_FLOW_ERROR_TYPE_ACTION,
11830                                       NULL,
11831                                       "cannot create rx queue");
11832                         normal_idx++;
11833                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11834                         sample_act->dr_queue_action = hrxq->action;
11835                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11836                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11837                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11838                 }
11839                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11840                         normal_idx++;
11841                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11842                                 dev_flow->handle->dvh.rix_encap_decap;
11843                         sample_act->dr_encap_action =
11844                                 dev_flow->dv.encap_decap->action;
11845                         dev_flow->handle->dvh.rix_encap_decap = 0;
11846                 }
11847                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11848                         normal_idx++;
11849                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11850                                 dev_flow->handle->rix_port_id_action;
11851                         sample_act->dr_port_id_action =
11852                                 dev_flow->dv.port_id_action->action;
11853                         dev_flow->handle->rix_port_id_action = 0;
11854                 }
11855                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11856                         normal_idx++;
11857                         mdest_res->sample_idx[dest_index].rix_jump =
11858                                 dev_flow->handle->rix_jump;
11859                         sample_act->dr_jump_action =
11860                                 dev_flow->dv.jump->action;
11861                         dev_flow->handle->rix_jump = 0;
11862                 }
11863                 sample_act->actions_num = normal_idx;
11864                 /* update sample action resource into first index of array */
11865                 mdest_res->ft_type = res->ft_type;
11866                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11867                                 sizeof(struct mlx5_flow_sub_actions_idx));
11868                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11869                                 sizeof(struct mlx5_flow_sub_actions_list));
11870                 mdest_res->num_of_dest = num_of_dest;
11871                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11872                                                          dev_flow, error))
11873                         return rte_flow_error_set(error, EINVAL,
11874                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11875                                                   NULL, "can't create sample "
11876                                                   "action");
11877         } else {
11878                 res->sub_actions = sample_actions;
11879                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11880                         return rte_flow_error_set(error, EINVAL,
11881                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11882                                                   NULL,
11883                                                   "can't create sample action");
11884         }
11885         return 0;
11886 }
11887
11888 /**
11889  * Remove an ASO age action from age actions list.
11890  *
11891  * @param[in] dev
11892  *   Pointer to the Ethernet device structure.
11893  * @param[in] age
11894  *   Pointer to the aso age action handler.
11895  */
11896 static void
11897 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11898                                 struct mlx5_aso_age_action *age)
11899 {
11900         struct mlx5_age_info *age_info;
11901         struct mlx5_age_param *age_param = &age->age_params;
11902         struct mlx5_priv *priv = dev->data->dev_private;
11903         uint16_t expected = AGE_CANDIDATE;
11904
11905         age_info = GET_PORT_AGE_INFO(priv);
11906         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11907                                          AGE_FREE, false, __ATOMIC_RELAXED,
11908                                          __ATOMIC_RELAXED)) {
11909                 /**
11910                  * We need the lock even it is age timeout,
11911                  * since age action may still in process.
11912                  */
11913                 rte_spinlock_lock(&age_info->aged_sl);
11914                 LIST_REMOVE(age, next);
11915                 rte_spinlock_unlock(&age_info->aged_sl);
11916                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11917         }
11918 }
11919
11920 /**
11921  * Release an ASO age action.
11922  *
11923  * @param[in] dev
11924  *   Pointer to the Ethernet device structure.
11925  * @param[in] age_idx
11926  *   Index of ASO age action to release.
11927  * @param[in] flow
11928  *   True if the release operation is during flow destroy operation.
11929  *   False if the release operation is during action destroy operation.
11930  *
11931  * @return
11932  *   0 when age action was removed, otherwise the number of references.
11933  */
11934 static int
11935 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11936 {
11937         struct mlx5_priv *priv = dev->data->dev_private;
11938         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11939         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11940         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11941
11942         if (!ret) {
11943                 flow_dv_aso_age_remove_from_age(dev, age);
11944                 rte_spinlock_lock(&mng->free_sl);
11945                 LIST_INSERT_HEAD(&mng->free, age, next);
11946                 rte_spinlock_unlock(&mng->free_sl);
11947         }
11948         return ret;
11949 }
11950
11951 /**
11952  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11953  *
11954  * @param[in] dev
11955  *   Pointer to the Ethernet device structure.
11956  *
11957  * @return
11958  *   0 on success, otherwise negative errno value and rte_errno is set.
11959  */
11960 static int
11961 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11962 {
11963         struct mlx5_priv *priv = dev->data->dev_private;
11964         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11965         void *old_pools = mng->pools;
11966         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11967         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11968         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11969
11970         if (!pools) {
11971                 rte_errno = ENOMEM;
11972                 return -ENOMEM;
11973         }
11974         if (old_pools) {
11975                 memcpy(pools, old_pools,
11976                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11977                 mlx5_free(old_pools);
11978         } else {
11979                 /* First ASO flow hit allocation - starting ASO data-path. */
11980                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11981
11982                 if (ret) {
11983                         mlx5_free(pools);
11984                         return ret;
11985                 }
11986         }
11987         mng->n = resize;
11988         mng->pools = pools;
11989         return 0;
11990 }
11991
11992 /**
11993  * Create and initialize a new ASO aging pool.
11994  *
11995  * @param[in] dev
11996  *   Pointer to the Ethernet device structure.
11997  * @param[out] age_free
11998  *   Where to put the pointer of a new age action.
11999  *
12000  * @return
12001  *   The age actions pool pointer and @p age_free is set on success,
12002  *   NULL otherwise and rte_errno is set.
12003  */
12004 static struct mlx5_aso_age_pool *
12005 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12006                         struct mlx5_aso_age_action **age_free)
12007 {
12008         struct mlx5_priv *priv = dev->data->dev_private;
12009         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12010         struct mlx5_aso_age_pool *pool = NULL;
12011         struct mlx5_devx_obj *obj = NULL;
12012         uint32_t i;
12013
12014         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12015                                                     priv->sh->cdev->pdn);
12016         if (!obj) {
12017                 rte_errno = ENODATA;
12018                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12019                 return NULL;
12020         }
12021         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12022         if (!pool) {
12023                 claim_zero(mlx5_devx_cmd_destroy(obj));
12024                 rte_errno = ENOMEM;
12025                 return NULL;
12026         }
12027         pool->flow_hit_aso_obj = obj;
12028         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12029         rte_rwlock_write_lock(&mng->resize_rwl);
12030         pool->index = mng->next;
12031         /* Resize pools array if there is no room for the new pool in it. */
12032         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12033                 claim_zero(mlx5_devx_cmd_destroy(obj));
12034                 mlx5_free(pool);
12035                 rte_rwlock_write_unlock(&mng->resize_rwl);
12036                 return NULL;
12037         }
12038         mng->pools[pool->index] = pool;
12039         mng->next++;
12040         rte_rwlock_write_unlock(&mng->resize_rwl);
12041         /* Assign the first action in the new pool, the rest go to free list. */
12042         *age_free = &pool->actions[0];
12043         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12044                 pool->actions[i].offset = i;
12045                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12046         }
12047         return pool;
12048 }
12049
12050 /**
12051  * Allocate a ASO aging bit.
12052  *
12053  * @param[in] dev
12054  *   Pointer to the Ethernet device structure.
12055  * @param[out] error
12056  *   Pointer to the error structure.
12057  *
12058  * @return
12059  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12060  */
12061 static uint32_t
12062 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12063 {
12064         struct mlx5_priv *priv = dev->data->dev_private;
12065         const struct mlx5_aso_age_pool *pool;
12066         struct mlx5_aso_age_action *age_free = NULL;
12067         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12068
12069         MLX5_ASSERT(mng);
12070         /* Try to get the next free age action bit. */
12071         rte_spinlock_lock(&mng->free_sl);
12072         age_free = LIST_FIRST(&mng->free);
12073         if (age_free) {
12074                 LIST_REMOVE(age_free, next);
12075         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12076                 rte_spinlock_unlock(&mng->free_sl);
12077                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12078                                    NULL, "failed to create ASO age pool");
12079                 return 0; /* 0 is an error. */
12080         }
12081         rte_spinlock_unlock(&mng->free_sl);
12082         pool = container_of
12083           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12084                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12085                                                                        actions);
12086         if (!age_free->dr_action) {
12087                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12088                                                  error);
12089
12090                 if (reg_c < 0) {
12091                         rte_flow_error_set(error, rte_errno,
12092                                            RTE_FLOW_ERROR_TYPE_ACTION,
12093                                            NULL, "failed to get reg_c "
12094                                            "for ASO flow hit");
12095                         return 0; /* 0 is an error. */
12096                 }
12097 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12098                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12099                                 (priv->sh->rx_domain,
12100                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12101                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12102                                  (reg_c - REG_C_0));
12103 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12104                 if (!age_free->dr_action) {
12105                         rte_errno = errno;
12106                         rte_spinlock_lock(&mng->free_sl);
12107                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12108                         rte_spinlock_unlock(&mng->free_sl);
12109                         rte_flow_error_set(error, rte_errno,
12110                                            RTE_FLOW_ERROR_TYPE_ACTION,
12111                                            NULL, "failed to create ASO "
12112                                            "flow hit action");
12113                         return 0; /* 0 is an error. */
12114                 }
12115         }
12116         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12117         return pool->index | ((age_free->offset + 1) << 16);
12118 }
12119
12120 /**
12121  * Initialize flow ASO age parameters.
12122  *
12123  * @param[in] dev
12124  *   Pointer to rte_eth_dev structure.
12125  * @param[in] age_idx
12126  *   Index of ASO age action.
12127  * @param[in] context
12128  *   Pointer to flow counter age context.
12129  * @param[in] timeout
12130  *   Aging timeout in seconds.
12131  *
12132  */
12133 static void
12134 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12135                             uint32_t age_idx,
12136                             void *context,
12137                             uint32_t timeout)
12138 {
12139         struct mlx5_aso_age_action *aso_age;
12140
12141         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12142         MLX5_ASSERT(aso_age);
12143         aso_age->age_params.context = context;
12144         aso_age->age_params.timeout = timeout;
12145         aso_age->age_params.port_id = dev->data->port_id;
12146         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12147                          __ATOMIC_RELAXED);
12148         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12149                          __ATOMIC_RELAXED);
12150 }
12151
12152 static void
12153 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12154                                const struct rte_flow_item_integrity *value,
12155                                void *headers_m, void *headers_v)
12156 {
12157         if (mask->l4_ok) {
12158                 /* RTE l4_ok filter aggregates hardware l4_ok and
12159                  * l4_checksum_ok filters.
12160                  * Positive RTE l4_ok match requires hardware match on both L4
12161                  * hardware integrity bits.
12162                  * For negative match, check hardware l4_checksum_ok bit only,
12163                  * because hardware sets that bit to 0 for all packets
12164                  * with bad L4.
12165                  */
12166                 if (value->l4_ok) {
12167                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12168                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12169                 }
12170                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12171                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12172                          !!value->l4_ok);
12173         }
12174         if (mask->l4_csum_ok) {
12175                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12176                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12177                          value->l4_csum_ok);
12178         }
12179 }
12180
12181 static void
12182 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12183                                const struct rte_flow_item_integrity *value,
12184                                void *headers_m, void *headers_v, bool is_ipv4)
12185 {
12186         if (mask->l3_ok) {
12187                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12188                  * ipv4_csum_ok filters.
12189                  * Positive RTE l3_ok match requires hardware match on both L3
12190                  * hardware integrity bits.
12191                  * For negative match, check hardware l3_csum_ok bit only,
12192                  * because hardware sets that bit to 0 for all packets
12193                  * with bad L3.
12194                  */
12195                 if (is_ipv4) {
12196                         if (value->l3_ok) {
12197                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12198                                          l3_ok, 1);
12199                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12200                                          l3_ok, 1);
12201                         }
12202                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12203                                  ipv4_checksum_ok, 1);
12204                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12205                                  ipv4_checksum_ok, !!value->l3_ok);
12206                 } else {
12207                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12208                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12209                                  value->l3_ok);
12210                 }
12211         }
12212         if (mask->ipv4_csum_ok) {
12213                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12214                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12215                          value->ipv4_csum_ok);
12216         }
12217 }
12218
12219 static void
12220 set_integrity_bits(void *headers_m, void *headers_v,
12221                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12222 {
12223         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12224         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12225
12226         /* Integrity bits validation cleared spec pointer */
12227         MLX5_ASSERT(spec != NULL);
12228         if (!mask)
12229                 mask = &rte_flow_item_integrity_mask;
12230         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12231                                        is_l3_ip4);
12232         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12233 }
12234
12235 static void
12236 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12237                                       const
12238                                       struct rte_flow_item *integrity_items[2],
12239                                       uint64_t pattern_flags)
12240 {
12241         void *headers_m, *headers_v;
12242         bool is_l3_ip4;
12243
12244         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12245                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12246                                          inner_headers);
12247                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12248                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12249                             0;
12250                 set_integrity_bits(headers_m, headers_v,
12251                                    integrity_items[1], is_l3_ip4);
12252         }
12253         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12254                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12255                                          outer_headers);
12256                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12257                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12258                             0;
12259                 set_integrity_bits(headers_m, headers_v,
12260                                    integrity_items[0], is_l3_ip4);
12261         }
12262 }
12263
12264 static void
12265 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12266                                  const struct rte_flow_item *integrity_items[2],
12267                                  uint64_t *last_item)
12268 {
12269         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12270
12271         /* integrity bits validation cleared spec pointer */
12272         MLX5_ASSERT(spec != NULL);
12273         if (spec->level > 1) {
12274                 integrity_items[1] = item;
12275                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12276         } else {
12277                 integrity_items[0] = item;
12278                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12279         }
12280 }
12281
12282 /**
12283  * Prepares DV flow counter with aging configuration.
12284  * Gets it by index when exists, creates a new one when doesn't.
12285  *
12286  * @param[in] dev
12287  *   Pointer to rte_eth_dev structure.
12288  * @param[in] dev_flow
12289  *   Pointer to the mlx5_flow.
12290  * @param[in, out] flow
12291  *   Pointer to the sub flow.
12292  * @param[in] count
12293  *   Pointer to the counter action configuration.
12294  * @param[in] age
12295  *   Pointer to the aging action configuration.
12296  * @param[out] error
12297  *   Pointer to the error structure.
12298  *
12299  * @return
12300  *   Pointer to the counter, NULL otherwise.
12301  */
12302 static struct mlx5_flow_counter *
12303 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12304                         struct mlx5_flow *dev_flow,
12305                         struct rte_flow *flow,
12306                         const struct rte_flow_action_count *count,
12307                         const struct rte_flow_action_age *age,
12308                         struct rte_flow_error *error)
12309 {
12310         if (!flow->counter) {
12311                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12312                                                                  count, age);
12313                 if (!flow->counter) {
12314                         rte_flow_error_set(error, rte_errno,
12315                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12316                                            "cannot create counter object.");
12317                         return NULL;
12318                 }
12319         }
12320         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12321 }
12322
12323 /*
12324  * Release an ASO CT action by its own device.
12325  *
12326  * @param[in] dev
12327  *   Pointer to the Ethernet device structure.
12328  * @param[in] idx
12329  *   Index of ASO CT action to release.
12330  *
12331  * @return
12332  *   0 when CT action was removed, otherwise the number of references.
12333  */
12334 static inline int
12335 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12336 {
12337         struct mlx5_priv *priv = dev->data->dev_private;
12338         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12339         uint32_t ret;
12340         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12341         enum mlx5_aso_ct_state state =
12342                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12343
12344         /* Cannot release when CT is in the ASO SQ. */
12345         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12346                 return -1;
12347         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12348         if (!ret) {
12349                 if (ct->dr_action_orig) {
12350 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12351                         claim_zero(mlx5_glue->destroy_flow_action
12352                                         (ct->dr_action_orig));
12353 #endif
12354                         ct->dr_action_orig = NULL;
12355                 }
12356                 if (ct->dr_action_rply) {
12357 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12358                         claim_zero(mlx5_glue->destroy_flow_action
12359                                         (ct->dr_action_rply));
12360 #endif
12361                         ct->dr_action_rply = NULL;
12362                 }
12363                 /* Clear the state to free, no need in 1st allocation. */
12364                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12365                 rte_spinlock_lock(&mng->ct_sl);
12366                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12367                 rte_spinlock_unlock(&mng->ct_sl);
12368         }
12369         return (int)ret;
12370 }
12371
12372 static inline int
12373 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12374                        struct rte_flow_error *error)
12375 {
12376         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12377         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12378         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12379         int ret;
12380
12381         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12382         if (dev->data->dev_started != 1)
12383                 return rte_flow_error_set(error, EAGAIN,
12384                                           RTE_FLOW_ERROR_TYPE_ACTION,
12385                                           NULL,
12386                                           "Indirect CT action cannot be destroyed when the port is stopped");
12387         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12388         if (ret < 0)
12389                 return rte_flow_error_set(error, EAGAIN,
12390                                           RTE_FLOW_ERROR_TYPE_ACTION,
12391                                           NULL,
12392                                           "Current state prevents indirect CT action from being destroyed");
12393         return ret;
12394 }
12395
12396 /*
12397  * Resize the ASO CT pools array by 64 pools.
12398  *
12399  * @param[in] dev
12400  *   Pointer to the Ethernet device structure.
12401  *
12402  * @return
12403  *   0 on success, otherwise negative errno value and rte_errno is set.
12404  */
12405 static int
12406 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12407 {
12408         struct mlx5_priv *priv = dev->data->dev_private;
12409         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12410         void *old_pools = mng->pools;
12411         /* Magic number now, need a macro. */
12412         uint32_t resize = mng->n + 64;
12413         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12414         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12415
12416         if (!pools) {
12417                 rte_errno = ENOMEM;
12418                 return -rte_errno;
12419         }
12420         rte_rwlock_write_lock(&mng->resize_rwl);
12421         /* ASO SQ/QP was already initialized in the startup. */
12422         if (old_pools) {
12423                 /* Realloc could be an alternative choice. */
12424                 rte_memcpy(pools, old_pools,
12425                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12426                 mlx5_free(old_pools);
12427         }
12428         mng->n = resize;
12429         mng->pools = pools;
12430         rte_rwlock_write_unlock(&mng->resize_rwl);
12431         return 0;
12432 }
12433
12434 /*
12435  * Create and initialize a new ASO CT pool.
12436  *
12437  * @param[in] dev
12438  *   Pointer to the Ethernet device structure.
12439  * @param[out] ct_free
12440  *   Where to put the pointer of a new CT action.
12441  *
12442  * @return
12443  *   The CT actions pool pointer and @p ct_free is set on success,
12444  *   NULL otherwise and rte_errno is set.
12445  */
12446 static struct mlx5_aso_ct_pool *
12447 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12448                        struct mlx5_aso_ct_action **ct_free)
12449 {
12450         struct mlx5_priv *priv = dev->data->dev_private;
12451         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12452         struct mlx5_aso_ct_pool *pool = NULL;
12453         struct mlx5_devx_obj *obj = NULL;
12454         uint32_t i;
12455         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12456
12457         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12458                                                           priv->sh->cdev->pdn,
12459                                                           log_obj_size);
12460         if (!obj) {
12461                 rte_errno = ENODATA;
12462                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12463                 return NULL;
12464         }
12465         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12466         if (!pool) {
12467                 rte_errno = ENOMEM;
12468                 claim_zero(mlx5_devx_cmd_destroy(obj));
12469                 return NULL;
12470         }
12471         pool->devx_obj = obj;
12472         pool->index = mng->next;
12473         /* Resize pools array if there is no room for the new pool in it. */
12474         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12475                 claim_zero(mlx5_devx_cmd_destroy(obj));
12476                 mlx5_free(pool);
12477                 return NULL;
12478         }
12479         mng->pools[pool->index] = pool;
12480         mng->next++;
12481         /* Assign the first action in the new pool, the rest go to free list. */
12482         *ct_free = &pool->actions[0];
12483         /* Lock outside, the list operation is safe here. */
12484         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12485                 /* refcnt is 0 when allocating the memory. */
12486                 pool->actions[i].offset = i;
12487                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12488         }
12489         return pool;
12490 }
12491
12492 /*
12493  * Allocate a ASO CT action from free list.
12494  *
12495  * @param[in] dev
12496  *   Pointer to the Ethernet device structure.
12497  * @param[out] error
12498  *   Pointer to the error structure.
12499  *
12500  * @return
12501  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12502  */
12503 static uint32_t
12504 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12505 {
12506         struct mlx5_priv *priv = dev->data->dev_private;
12507         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12508         struct mlx5_aso_ct_action *ct = NULL;
12509         struct mlx5_aso_ct_pool *pool;
12510         uint8_t reg_c;
12511         uint32_t ct_idx;
12512
12513         MLX5_ASSERT(mng);
12514         if (!priv->sh->devx) {
12515                 rte_errno = ENOTSUP;
12516                 return 0;
12517         }
12518         /* Get a free CT action, if no, a new pool will be created. */
12519         rte_spinlock_lock(&mng->ct_sl);
12520         ct = LIST_FIRST(&mng->free_cts);
12521         if (ct) {
12522                 LIST_REMOVE(ct, next);
12523         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12524                 rte_spinlock_unlock(&mng->ct_sl);
12525                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12526                                    NULL, "failed to create ASO CT pool");
12527                 return 0;
12528         }
12529         rte_spinlock_unlock(&mng->ct_sl);
12530         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12531         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12532         /* 0: inactive, 1: created, 2+: used by flows. */
12533         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12534         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12535         if (!ct->dr_action_orig) {
12536 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12537                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12538                         (priv->sh->rx_domain, pool->devx_obj->obj,
12539                          ct->offset,
12540                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12541                          reg_c - REG_C_0);
12542 #else
12543                 RTE_SET_USED(reg_c);
12544 #endif
12545                 if (!ct->dr_action_orig) {
12546                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12547                         rte_flow_error_set(error, rte_errno,
12548                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12549                                            "failed to create ASO CT action");
12550                         return 0;
12551                 }
12552         }
12553         if (!ct->dr_action_rply) {
12554 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12555                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12556                         (priv->sh->rx_domain, pool->devx_obj->obj,
12557                          ct->offset,
12558                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12559                          reg_c - REG_C_0);
12560 #endif
12561                 if (!ct->dr_action_rply) {
12562                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12563                         rte_flow_error_set(error, rte_errno,
12564                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12565                                            "failed to create ASO CT action");
12566                         return 0;
12567                 }
12568         }
12569         return ct_idx;
12570 }
12571
12572 /*
12573  * Create a conntrack object with context and actions by using ASO mechanism.
12574  *
12575  * @param[in] dev
12576  *   Pointer to rte_eth_dev structure.
12577  * @param[in] pro
12578  *   Pointer to conntrack information profile.
12579  * @param[out] error
12580  *   Pointer to the error structure.
12581  *
12582  * @return
12583  *   Index to conntrack object on success, 0 otherwise.
12584  */
12585 static uint32_t
12586 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12587                                    const struct rte_flow_action_conntrack *pro,
12588                                    struct rte_flow_error *error)
12589 {
12590         struct mlx5_priv *priv = dev->data->dev_private;
12591         struct mlx5_dev_ctx_shared *sh = priv->sh;
12592         struct mlx5_aso_ct_action *ct;
12593         uint32_t idx;
12594
12595         if (!sh->ct_aso_en)
12596                 return rte_flow_error_set(error, ENOTSUP,
12597                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12598                                           "Connection is not supported");
12599         idx = flow_dv_aso_ct_alloc(dev, error);
12600         if (!idx)
12601                 return rte_flow_error_set(error, rte_errno,
12602                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12603                                           "Failed to allocate CT object");
12604         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12605         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12606                 return rte_flow_error_set(error, EBUSY,
12607                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12608                                           "Failed to update CT");
12609         ct->is_original = !!pro->is_original_dir;
12610         ct->peer = pro->peer_port;
12611         return idx;
12612 }
12613
12614 /**
12615  * Fill the flow with DV spec, lock free
12616  * (mutex should be acquired by caller).
12617  *
12618  * @param[in] dev
12619  *   Pointer to rte_eth_dev structure.
12620  * @param[in, out] dev_flow
12621  *   Pointer to the sub flow.
12622  * @param[in] attr
12623  *   Pointer to the flow attributes.
12624  * @param[in] items
12625  *   Pointer to the list of items.
12626  * @param[in] actions
12627  *   Pointer to the list of actions.
12628  * @param[out] error
12629  *   Pointer to the error structure.
12630  *
12631  * @return
12632  *   0 on success, a negative errno value otherwise and rte_errno is set.
12633  */
12634 static int
12635 flow_dv_translate(struct rte_eth_dev *dev,
12636                   struct mlx5_flow *dev_flow,
12637                   const struct rte_flow_attr *attr,
12638                   const struct rte_flow_item items[],
12639                   const struct rte_flow_action actions[],
12640                   struct rte_flow_error *error)
12641 {
12642         struct mlx5_priv *priv = dev->data->dev_private;
12643         struct mlx5_dev_config *dev_conf = &priv->config;
12644         struct rte_flow *flow = dev_flow->flow;
12645         struct mlx5_flow_handle *handle = dev_flow->handle;
12646         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12647         struct mlx5_flow_rss_desc *rss_desc;
12648         uint64_t item_flags = 0;
12649         uint64_t last_item = 0;
12650         uint64_t action_flags = 0;
12651         struct mlx5_flow_dv_matcher matcher = {
12652                 .mask = {
12653                         .size = sizeof(matcher.mask.buf),
12654                 },
12655         };
12656         int actions_n = 0;
12657         bool actions_end = false;
12658         union {
12659                 struct mlx5_flow_dv_modify_hdr_resource res;
12660                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12661                             sizeof(struct mlx5_modification_cmd) *
12662                             (MLX5_MAX_MODIFY_NUM + 1)];
12663         } mhdr_dummy;
12664         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12665         const struct rte_flow_action_count *count = NULL;
12666         const struct rte_flow_action_age *non_shared_age = NULL;
12667         union flow_dv_attr flow_attr = { .attr = 0 };
12668         uint32_t tag_be;
12669         union mlx5_flow_tbl_key tbl_key;
12670         uint32_t modify_action_position = UINT32_MAX;
12671         void *match_mask = matcher.mask.buf;
12672         void *match_value = dev_flow->dv.value.buf;
12673         uint8_t next_protocol = 0xff;
12674         struct rte_vlan_hdr vlan = { 0 };
12675         struct mlx5_flow_dv_dest_array_resource mdest_res;
12676         struct mlx5_flow_dv_sample_resource sample_res;
12677         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12678         const struct rte_flow_action_sample *sample = NULL;
12679         struct mlx5_flow_sub_actions_list *sample_act;
12680         uint32_t sample_act_pos = UINT32_MAX;
12681         uint32_t age_act_pos = UINT32_MAX;
12682         uint32_t num_of_dest = 0;
12683         int tmp_actions_n = 0;
12684         uint32_t table;
12685         int ret = 0;
12686         const struct mlx5_flow_tunnel *tunnel = NULL;
12687         struct flow_grp_info grp_info = {
12688                 .external = !!dev_flow->external,
12689                 .transfer = !!attr->transfer,
12690                 .fdb_def_rule = !!priv->fdb_def_rule,
12691                 .skip_scale = dev_flow->skip_scale &
12692                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12693                 .std_tbl_fix = true,
12694         };
12695         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12696         const struct rte_flow_item *tunnel_item = NULL;
12697
12698         if (!wks)
12699                 return rte_flow_error_set(error, ENOMEM,
12700                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12701                                           NULL,
12702                                           "failed to push flow workspace");
12703         rss_desc = &wks->rss_desc;
12704         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12705         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12706         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12707                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12708         /* update normal path action resource into last index of array */
12709         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12710         if (is_tunnel_offload_active(dev)) {
12711                 if (dev_flow->tunnel) {
12712                         RTE_VERIFY(dev_flow->tof_type ==
12713                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12714                         tunnel = dev_flow->tunnel;
12715                 } else {
12716                         tunnel = mlx5_get_tof(items, actions,
12717                                               &dev_flow->tof_type);
12718                         dev_flow->tunnel = tunnel;
12719                 }
12720                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12721                                         (dev, attr, tunnel, dev_flow->tof_type);
12722         }
12723         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12724                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12725         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12726                                        &grp_info, error);
12727         if (ret)
12728                 return ret;
12729         dev_flow->dv.group = table;
12730         if (attr->transfer)
12731                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12732         /* number of actions must be set to 0 in case of dirty stack. */
12733         mhdr_res->actions_num = 0;
12734         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12735                 /*
12736                  * do not add decap action if match rule drops packet
12737                  * HW rejects rules with decap & drop
12738                  *
12739                  * if tunnel match rule was inserted before matching tunnel set
12740                  * rule flow table used in the match rule must be registered.
12741                  * current implementation handles that in the
12742                  * flow_dv_match_register() at the function end.
12743                  */
12744                 bool add_decap = true;
12745                 const struct rte_flow_action *ptr = actions;
12746
12747                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12748                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12749                                 add_decap = false;
12750                                 break;
12751                         }
12752                 }
12753                 if (add_decap) {
12754                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12755                                                            attr->transfer,
12756                                                            error))
12757                                 return -rte_errno;
12758                         dev_flow->dv.actions[actions_n++] =
12759                                         dev_flow->dv.encap_decap->action;
12760                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12761                 }
12762         }
12763         for (; !actions_end ; actions++) {
12764                 const struct rte_flow_action_queue *queue;
12765                 const struct rte_flow_action_rss *rss;
12766                 const struct rte_flow_action *action = actions;
12767                 const uint8_t *rss_key;
12768                 struct mlx5_flow_tbl_resource *tbl;
12769                 struct mlx5_aso_age_action *age_act;
12770                 struct mlx5_flow_counter *cnt_act;
12771                 uint32_t port_id = 0;
12772                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12773                 int action_type = actions->type;
12774                 const struct rte_flow_action *found_action = NULL;
12775                 uint32_t jump_group = 0;
12776                 uint32_t owner_idx;
12777                 struct mlx5_aso_ct_action *ct;
12778
12779                 if (!mlx5_flow_os_action_supported(action_type))
12780                         return rte_flow_error_set(error, ENOTSUP,
12781                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12782                                                   actions,
12783                                                   "action not supported");
12784                 switch (action_type) {
12785                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12786                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12787                         break;
12788                 case RTE_FLOW_ACTION_TYPE_VOID:
12789                         break;
12790                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12791                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12792                         if (flow_dv_translate_action_port_id(dev, action,
12793                                                              &port_id, error))
12794                                 return -rte_errno;
12795                         port_id_resource.port_id = port_id;
12796                         MLX5_ASSERT(!handle->rix_port_id_action);
12797                         if (flow_dv_port_id_action_resource_register
12798                             (dev, &port_id_resource, dev_flow, error))
12799                                 return -rte_errno;
12800                         dev_flow->dv.actions[actions_n++] =
12801                                         dev_flow->dv.port_id_action->action;
12802                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12803                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12804                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12805                         num_of_dest++;
12806                         break;
12807                 case RTE_FLOW_ACTION_TYPE_FLAG:
12808                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12809                         dev_flow->handle->mark = 1;
12810                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12811                                 struct rte_flow_action_mark mark = {
12812                                         .id = MLX5_FLOW_MARK_DEFAULT,
12813                                 };
12814
12815                                 if (flow_dv_convert_action_mark(dev, &mark,
12816                                                                 mhdr_res,
12817                                                                 error))
12818                                         return -rte_errno;
12819                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12820                                 break;
12821                         }
12822                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12823                         /*
12824                          * Only one FLAG or MARK is supported per device flow
12825                          * right now. So the pointer to the tag resource must be
12826                          * zero before the register process.
12827                          */
12828                         MLX5_ASSERT(!handle->dvh.rix_tag);
12829                         if (flow_dv_tag_resource_register(dev, tag_be,
12830                                                           dev_flow, error))
12831                                 return -rte_errno;
12832                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12833                         dev_flow->dv.actions[actions_n++] =
12834                                         dev_flow->dv.tag_resource->action;
12835                         break;
12836                 case RTE_FLOW_ACTION_TYPE_MARK:
12837                         action_flags |= MLX5_FLOW_ACTION_MARK;
12838                         dev_flow->handle->mark = 1;
12839                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12840                                 const struct rte_flow_action_mark *mark =
12841                                         (const struct rte_flow_action_mark *)
12842                                                 actions->conf;
12843
12844                                 if (flow_dv_convert_action_mark(dev, mark,
12845                                                                 mhdr_res,
12846                                                                 error))
12847                                         return -rte_errno;
12848                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12849                                 break;
12850                         }
12851                         /* Fall-through */
12852                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12853                         /* Legacy (non-extensive) MARK action. */
12854                         tag_be = mlx5_flow_mark_set
12855                               (((const struct rte_flow_action_mark *)
12856                                (actions->conf))->id);
12857                         MLX5_ASSERT(!handle->dvh.rix_tag);
12858                         if (flow_dv_tag_resource_register(dev, tag_be,
12859                                                           dev_flow, error))
12860                                 return -rte_errno;
12861                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12862                         dev_flow->dv.actions[actions_n++] =
12863                                         dev_flow->dv.tag_resource->action;
12864                         break;
12865                 case RTE_FLOW_ACTION_TYPE_SET_META:
12866                         if (flow_dv_convert_action_set_meta
12867                                 (dev, mhdr_res, attr,
12868                                  (const struct rte_flow_action_set_meta *)
12869                                   actions->conf, error))
12870                                 return -rte_errno;
12871                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12872                         break;
12873                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12874                         if (flow_dv_convert_action_set_tag
12875                                 (dev, mhdr_res,
12876                                  (const struct rte_flow_action_set_tag *)
12877                                   actions->conf, error))
12878                                 return -rte_errno;
12879                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12880                         break;
12881                 case RTE_FLOW_ACTION_TYPE_DROP:
12882                         action_flags |= MLX5_FLOW_ACTION_DROP;
12883                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12884                         break;
12885                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12886                         queue = actions->conf;
12887                         rss_desc->queue_num = 1;
12888                         rss_desc->queue[0] = queue->index;
12889                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12890                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12891                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12892                         num_of_dest++;
12893                         break;
12894                 case RTE_FLOW_ACTION_TYPE_RSS:
12895                         rss = actions->conf;
12896                         memcpy(rss_desc->queue, rss->queue,
12897                                rss->queue_num * sizeof(uint16_t));
12898                         rss_desc->queue_num = rss->queue_num;
12899                         /* NULL RSS key indicates default RSS key. */
12900                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12901                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12902                         /*
12903                          * rss->level and rss.types should be set in advance
12904                          * when expanding items for RSS.
12905                          */
12906                         action_flags |= MLX5_FLOW_ACTION_RSS;
12907                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12908                                 MLX5_FLOW_FATE_SHARED_RSS :
12909                                 MLX5_FLOW_FATE_QUEUE;
12910                         break;
12911                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12912                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12913                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12914                         if (flow->age == 0) {
12915                                 flow->age = owner_idx;
12916                                 __atomic_fetch_add(&age_act->refcnt, 1,
12917                                                    __ATOMIC_RELAXED);
12918                         }
12919                         age_act_pos = actions_n++;
12920                         action_flags |= MLX5_FLOW_ACTION_AGE;
12921                         break;
12922                 case RTE_FLOW_ACTION_TYPE_AGE:
12923                         non_shared_age = action->conf;
12924                         age_act_pos = actions_n++;
12925                         action_flags |= MLX5_FLOW_ACTION_AGE;
12926                         break;
12927                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12928                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12929                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12930                                                              NULL);
12931                         MLX5_ASSERT(cnt_act != NULL);
12932                         /**
12933                          * When creating meter drop flow in drop table, the
12934                          * counter should not overwrite the rte flow counter.
12935                          */
12936                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12937                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12938                                 dev_flow->dv.actions[actions_n++] =
12939                                                         cnt_act->action;
12940                         } else {
12941                                 if (flow->counter == 0) {
12942                                         flow->counter = owner_idx;
12943                                         __atomic_fetch_add
12944                                                 (&cnt_act->shared_info.refcnt,
12945                                                  1, __ATOMIC_RELAXED);
12946                                 }
12947                                 /* Save information first, will apply later. */
12948                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12949                         }
12950                         break;
12951                 case RTE_FLOW_ACTION_TYPE_COUNT:
12952                         if (!priv->sh->devx) {
12953                                 return rte_flow_error_set
12954                                               (error, ENOTSUP,
12955                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12956                                                NULL,
12957                                                "count action not supported");
12958                         }
12959                         /* Save information first, will apply later. */
12960                         count = action->conf;
12961                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12962                         break;
12963                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12964                         dev_flow->dv.actions[actions_n++] =
12965                                                 priv->sh->pop_vlan_action;
12966                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12967                         break;
12968                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12969                         if (!(action_flags &
12970                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12971                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12972                         vlan.eth_proto = rte_be_to_cpu_16
12973                              ((((const struct rte_flow_action_of_push_vlan *)
12974                                                    actions->conf)->ethertype));
12975                         found_action = mlx5_flow_find_action
12976                                         (actions + 1,
12977                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12978                         if (found_action)
12979                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12980                         found_action = mlx5_flow_find_action
12981                                         (actions + 1,
12982                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12983                         if (found_action)
12984                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12985                         if (flow_dv_create_action_push_vlan
12986                                             (dev, attr, &vlan, dev_flow, error))
12987                                 return -rte_errno;
12988                         dev_flow->dv.actions[actions_n++] =
12989                                         dev_flow->dv.push_vlan_res->action;
12990                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12991                         break;
12992                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12993                         /* of_vlan_push action handled this action */
12994                         MLX5_ASSERT(action_flags &
12995                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12996                         break;
12997                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12998                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12999                                 break;
13000                         flow_dev_get_vlan_info_from_items(items, &vlan);
13001                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13002                         /* If no VLAN push - this is a modify header action */
13003                         if (flow_dv_convert_action_modify_vlan_vid
13004                                                 (mhdr_res, actions, error))
13005                                 return -rte_errno;
13006                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13007                         break;
13008                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13009                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13010                         if (flow_dv_create_action_l2_encap(dev, actions,
13011                                                            dev_flow,
13012                                                            attr->transfer,
13013                                                            error))
13014                                 return -rte_errno;
13015                         dev_flow->dv.actions[actions_n++] =
13016                                         dev_flow->dv.encap_decap->action;
13017                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13018                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13019                                 sample_act->action_flags |=
13020                                                         MLX5_FLOW_ACTION_ENCAP;
13021                         break;
13022                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13023                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13024                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13025                                                            attr->transfer,
13026                                                            error))
13027                                 return -rte_errno;
13028                         dev_flow->dv.actions[actions_n++] =
13029                                         dev_flow->dv.encap_decap->action;
13030                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13031                         break;
13032                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13033                         /* Handle encap with preceding decap. */
13034                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13035                                 if (flow_dv_create_action_raw_encap
13036                                         (dev, actions, dev_flow, attr, error))
13037                                         return -rte_errno;
13038                                 dev_flow->dv.actions[actions_n++] =
13039                                         dev_flow->dv.encap_decap->action;
13040                         } else {
13041                                 /* Handle encap without preceding decap. */
13042                                 if (flow_dv_create_action_l2_encap
13043                                     (dev, actions, dev_flow, attr->transfer,
13044                                      error))
13045                                         return -rte_errno;
13046                                 dev_flow->dv.actions[actions_n++] =
13047                                         dev_flow->dv.encap_decap->action;
13048                         }
13049                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13050                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13051                                 sample_act->action_flags |=
13052                                                         MLX5_FLOW_ACTION_ENCAP;
13053                         break;
13054                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13055                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13056                                 ;
13057                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13058                                 if (flow_dv_create_action_l2_decap
13059                                     (dev, dev_flow, attr->transfer, error))
13060                                         return -rte_errno;
13061                                 dev_flow->dv.actions[actions_n++] =
13062                                         dev_flow->dv.encap_decap->action;
13063                         }
13064                         /* If decap is followed by encap, handle it at encap. */
13065                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13066                         break;
13067                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13068                         dev_flow->dv.actions[actions_n++] =
13069                                 (void *)(uintptr_t)action->conf;
13070                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13071                         break;
13072                 case RTE_FLOW_ACTION_TYPE_JUMP:
13073                         jump_group = ((const struct rte_flow_action_jump *)
13074                                                         action->conf)->group;
13075                         grp_info.std_tbl_fix = 0;
13076                         if (dev_flow->skip_scale &
13077                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13078                                 grp_info.skip_scale = 1;
13079                         else
13080                                 grp_info.skip_scale = 0;
13081                         ret = mlx5_flow_group_to_table(dev, tunnel,
13082                                                        jump_group,
13083                                                        &table,
13084                                                        &grp_info, error);
13085                         if (ret)
13086                                 return ret;
13087                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13088                                                        attr->transfer,
13089                                                        !!dev_flow->external,
13090                                                        tunnel, jump_group, 0,
13091                                                        0, error);
13092                         if (!tbl)
13093                                 return rte_flow_error_set
13094                                                 (error, errno,
13095                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13096                                                  NULL,
13097                                                  "cannot create jump action.");
13098                         if (flow_dv_jump_tbl_resource_register
13099                             (dev, tbl, dev_flow, error)) {
13100                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13101                                 return rte_flow_error_set
13102                                                 (error, errno,
13103                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13104                                                  NULL,
13105                                                  "cannot create jump action.");
13106                         }
13107                         dev_flow->dv.actions[actions_n++] =
13108                                         dev_flow->dv.jump->action;
13109                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13110                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13111                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13112                         num_of_dest++;
13113                         break;
13114                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13115                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13116                         if (flow_dv_convert_action_modify_mac
13117                                         (mhdr_res, actions, error))
13118                                 return -rte_errno;
13119                         action_flags |= actions->type ==
13120                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13121                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13122                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13123                         break;
13124                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13125                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13126                         if (flow_dv_convert_action_modify_ipv4
13127                                         (mhdr_res, actions, error))
13128                                 return -rte_errno;
13129                         action_flags |= actions->type ==
13130                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13131                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13132                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13133                         break;
13134                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13135                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13136                         if (flow_dv_convert_action_modify_ipv6
13137                                         (mhdr_res, actions, error))
13138                                 return -rte_errno;
13139                         action_flags |= actions->type ==
13140                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13141                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13142                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13143                         break;
13144                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13145                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13146                         if (flow_dv_convert_action_modify_tp
13147                                         (mhdr_res, actions, items,
13148                                          &flow_attr, dev_flow, !!(action_flags &
13149                                          MLX5_FLOW_ACTION_DECAP), error))
13150                                 return -rte_errno;
13151                         action_flags |= actions->type ==
13152                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13153                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13154                                         MLX5_FLOW_ACTION_SET_TP_DST;
13155                         break;
13156                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13157                         if (flow_dv_convert_action_modify_dec_ttl
13158                                         (mhdr_res, items, &flow_attr, dev_flow,
13159                                          !!(action_flags &
13160                                          MLX5_FLOW_ACTION_DECAP), error))
13161                                 return -rte_errno;
13162                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13163                         break;
13164                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13165                         if (flow_dv_convert_action_modify_ttl
13166                                         (mhdr_res, actions, items, &flow_attr,
13167                                          dev_flow, !!(action_flags &
13168                                          MLX5_FLOW_ACTION_DECAP), error))
13169                                 return -rte_errno;
13170                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13171                         break;
13172                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13173                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13174                         if (flow_dv_convert_action_modify_tcp_seq
13175                                         (mhdr_res, actions, error))
13176                                 return -rte_errno;
13177                         action_flags |= actions->type ==
13178                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13179                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13180                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13181                         break;
13182
13183                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13184                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13185                         if (flow_dv_convert_action_modify_tcp_ack
13186                                         (mhdr_res, actions, error))
13187                                 return -rte_errno;
13188                         action_flags |= actions->type ==
13189                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13190                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13191                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13192                         break;
13193                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13194                         if (flow_dv_convert_action_set_reg
13195                                         (mhdr_res, actions, error))
13196                                 return -rte_errno;
13197                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13198                         break;
13199                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13200                         if (flow_dv_convert_action_copy_mreg
13201                                         (dev, mhdr_res, actions, error))
13202                                 return -rte_errno;
13203                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13204                         break;
13205                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13206                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13207                         dev_flow->handle->fate_action =
13208                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13209                         break;
13210                 case RTE_FLOW_ACTION_TYPE_METER:
13211                         if (!wks->fm)
13212                                 return rte_flow_error_set(error, rte_errno,
13213                                         RTE_FLOW_ERROR_TYPE_ACTION,
13214                                         NULL, "Failed to get meter in flow.");
13215                         /* Set the meter action. */
13216                         dev_flow->dv.actions[actions_n++] =
13217                                 wks->fm->meter_action;
13218                         action_flags |= MLX5_FLOW_ACTION_METER;
13219                         break;
13220                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13221                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13222                                                               actions, error))
13223                                 return -rte_errno;
13224                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13225                         break;
13226                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13227                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13228                                                               actions, error))
13229                                 return -rte_errno;
13230                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13231                         break;
13232                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13233                         sample_act_pos = actions_n;
13234                         sample = (const struct rte_flow_action_sample *)
13235                                  action->conf;
13236                         actions_n++;
13237                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13238                         /* put encap action into group if work with port id */
13239                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13240                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13241                                 sample_act->action_flags |=
13242                                                         MLX5_FLOW_ACTION_ENCAP;
13243                         break;
13244                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13245                         if (flow_dv_convert_action_modify_field
13246                                         (dev, mhdr_res, actions, attr, error))
13247                                 return -rte_errno;
13248                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13249                         break;
13250                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13251                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13252                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13253                         if (!ct)
13254                                 return rte_flow_error_set(error, EINVAL,
13255                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13256                                                 NULL,
13257                                                 "Failed to get CT object.");
13258                         if (mlx5_aso_ct_available(priv->sh, ct))
13259                                 return rte_flow_error_set(error, rte_errno,
13260                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13261                                                 NULL,
13262                                                 "CT is unavailable.");
13263                         if (ct->is_original)
13264                                 dev_flow->dv.actions[actions_n] =
13265                                                         ct->dr_action_orig;
13266                         else
13267                                 dev_flow->dv.actions[actions_n] =
13268                                                         ct->dr_action_rply;
13269                         if (flow->ct == 0) {
13270                                 flow->indirect_type =
13271                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13272                                 flow->ct = owner_idx;
13273                                 __atomic_fetch_add(&ct->refcnt, 1,
13274                                                    __ATOMIC_RELAXED);
13275                         }
13276                         actions_n++;
13277                         action_flags |= MLX5_FLOW_ACTION_CT;
13278                         break;
13279                 case RTE_FLOW_ACTION_TYPE_END:
13280                         actions_end = true;
13281                         if (mhdr_res->actions_num) {
13282                                 /* create modify action if needed. */
13283                                 if (flow_dv_modify_hdr_resource_register
13284                                         (dev, mhdr_res, dev_flow, error))
13285                                         return -rte_errno;
13286                                 dev_flow->dv.actions[modify_action_position] =
13287                                         handle->dvh.modify_hdr->action;
13288                         }
13289                         /*
13290                          * Handle AGE and COUNT action by single HW counter
13291                          * when they are not shared.
13292                          */
13293                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13294                                 if ((non_shared_age && count) ||
13295                                     !(priv->sh->flow_hit_aso_en &&
13296                                       (attr->group || attr->transfer))) {
13297                                         /* Creates age by counters. */
13298                                         cnt_act = flow_dv_prepare_counter
13299                                                                 (dev, dev_flow,
13300                                                                  flow, count,
13301                                                                  non_shared_age,
13302                                                                  error);
13303                                         if (!cnt_act)
13304                                                 return -rte_errno;
13305                                         dev_flow->dv.actions[age_act_pos] =
13306                                                                 cnt_act->action;
13307                                         break;
13308                                 }
13309                                 if (!flow->age && non_shared_age) {
13310                                         flow->age = flow_dv_aso_age_alloc
13311                                                                 (dev, error);
13312                                         if (!flow->age)
13313                                                 return -rte_errno;
13314                                         flow_dv_aso_age_params_init
13315                                                     (dev, flow->age,
13316                                                      non_shared_age->context ?
13317                                                      non_shared_age->context :
13318                                                      (void *)(uintptr_t)
13319                                                      (dev_flow->flow_idx),
13320                                                      non_shared_age->timeout);
13321                                 }
13322                                 age_act = flow_aso_age_get_by_idx(dev,
13323                                                                   flow->age);
13324                                 dev_flow->dv.actions[age_act_pos] =
13325                                                              age_act->dr_action;
13326                         }
13327                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13328                                 /*
13329                                  * Create one count action, to be used
13330                                  * by all sub-flows.
13331                                  */
13332                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13333                                                                   flow, count,
13334                                                                   NULL, error);
13335                                 if (!cnt_act)
13336                                         return -rte_errno;
13337                                 dev_flow->dv.actions[actions_n++] =
13338                                                                 cnt_act->action;
13339                         }
13340                 default:
13341                         break;
13342                 }
13343                 if (mhdr_res->actions_num &&
13344                     modify_action_position == UINT32_MAX)
13345                         modify_action_position = actions_n++;
13346         }
13347         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13348                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13349                 int item_type = items->type;
13350
13351                 if (!mlx5_flow_os_item_supported(item_type))
13352                         return rte_flow_error_set(error, ENOTSUP,
13353                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13354                                                   NULL, "item not supported");
13355                 switch (item_type) {
13356                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13357                         flow_dv_translate_item_port_id
13358                                 (dev, match_mask, match_value, items, attr);
13359                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13360                         break;
13361                 case RTE_FLOW_ITEM_TYPE_ETH:
13362                         flow_dv_translate_item_eth(match_mask, match_value,
13363                                                    items, tunnel,
13364                                                    dev_flow->dv.group);
13365                         matcher.priority = action_flags &
13366                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13367                                         !dev_flow->external ?
13368                                         MLX5_PRIORITY_MAP_L3 :
13369                                         MLX5_PRIORITY_MAP_L2;
13370                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13371                                              MLX5_FLOW_LAYER_OUTER_L2;
13372                         break;
13373                 case RTE_FLOW_ITEM_TYPE_VLAN:
13374                         flow_dv_translate_item_vlan(dev_flow,
13375                                                     match_mask, match_value,
13376                                                     items, tunnel,
13377                                                     dev_flow->dv.group);
13378                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13379                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13380                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13381                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13382                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13383                         break;
13384                 case RTE_FLOW_ITEM_TYPE_IPV4:
13385                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13386                                                   &item_flags, &tunnel);
13387                         flow_dv_translate_item_ipv4(match_mask, match_value,
13388                                                     items, tunnel,
13389                                                     dev_flow->dv.group);
13390                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13391                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13392                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13393                         if (items->mask != NULL &&
13394                             ((const struct rte_flow_item_ipv4 *)
13395                              items->mask)->hdr.next_proto_id) {
13396                                 next_protocol =
13397                                         ((const struct rte_flow_item_ipv4 *)
13398                                          (items->spec))->hdr.next_proto_id;
13399                                 next_protocol &=
13400                                         ((const struct rte_flow_item_ipv4 *)
13401                                          (items->mask))->hdr.next_proto_id;
13402                         } else {
13403                                 /* Reset for inner layer. */
13404                                 next_protocol = 0xff;
13405                         }
13406                         break;
13407                 case RTE_FLOW_ITEM_TYPE_IPV6:
13408                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13409                                                   &item_flags, &tunnel);
13410                         flow_dv_translate_item_ipv6(match_mask, match_value,
13411                                                     items, tunnel,
13412                                                     dev_flow->dv.group);
13413                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13414                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13415                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13416                         if (items->mask != NULL &&
13417                             ((const struct rte_flow_item_ipv6 *)
13418                              items->mask)->hdr.proto) {
13419                                 next_protocol =
13420                                         ((const struct rte_flow_item_ipv6 *)
13421                                          items->spec)->hdr.proto;
13422                                 next_protocol &=
13423                                         ((const struct rte_flow_item_ipv6 *)
13424                                          items->mask)->hdr.proto;
13425                         } else {
13426                                 /* Reset for inner layer. */
13427                                 next_protocol = 0xff;
13428                         }
13429                         break;
13430                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13431                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13432                                                              match_value,
13433                                                              items, tunnel);
13434                         last_item = tunnel ?
13435                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13436                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13437                         if (items->mask != NULL &&
13438                             ((const struct rte_flow_item_ipv6_frag_ext *)
13439                              items->mask)->hdr.next_header) {
13440                                 next_protocol =
13441                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13442                                  items->spec)->hdr.next_header;
13443                                 next_protocol &=
13444                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13445                                  items->mask)->hdr.next_header;
13446                         } else {
13447                                 /* Reset for inner layer. */
13448                                 next_protocol = 0xff;
13449                         }
13450                         break;
13451                 case RTE_FLOW_ITEM_TYPE_TCP:
13452                         flow_dv_translate_item_tcp(match_mask, match_value,
13453                                                    items, tunnel);
13454                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13455                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13456                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13457                         break;
13458                 case RTE_FLOW_ITEM_TYPE_UDP:
13459                         flow_dv_translate_item_udp(match_mask, match_value,
13460                                                    items, tunnel);
13461                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13462                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13463                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13464                         break;
13465                 case RTE_FLOW_ITEM_TYPE_GRE:
13466                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13467                         last_item = MLX5_FLOW_LAYER_GRE;
13468                         tunnel_item = items;
13469                         break;
13470                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13471                         flow_dv_translate_item_gre_key(match_mask,
13472                                                        match_value, items);
13473                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13474                         break;
13475                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13476                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13477                         last_item = MLX5_FLOW_LAYER_GRE;
13478                         tunnel_item = items;
13479                         break;
13480                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13481                         flow_dv_translate_item_vxlan(dev, attr,
13482                                                      match_mask, match_value,
13483                                                      items, tunnel);
13484                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13485                         last_item = MLX5_FLOW_LAYER_VXLAN;
13486                         break;
13487                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13488                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13489                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13490                         tunnel_item = items;
13491                         break;
13492                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13493                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13494                         last_item = MLX5_FLOW_LAYER_GENEVE;
13495                         tunnel_item = items;
13496                         break;
13497                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13498                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13499                                                           match_value,
13500                                                           items, error);
13501                         if (ret)
13502                                 return rte_flow_error_set(error, -ret,
13503                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13504                                         "cannot create GENEVE TLV option");
13505                         flow->geneve_tlv_option = 1;
13506                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13507                         break;
13508                 case RTE_FLOW_ITEM_TYPE_MPLS:
13509                         flow_dv_translate_item_mpls(match_mask, match_value,
13510                                                     items, last_item, tunnel);
13511                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13512                         last_item = MLX5_FLOW_LAYER_MPLS;
13513                         break;
13514                 case RTE_FLOW_ITEM_TYPE_MARK:
13515                         flow_dv_translate_item_mark(dev, match_mask,
13516                                                     match_value, items);
13517                         last_item = MLX5_FLOW_ITEM_MARK;
13518                         break;
13519                 case RTE_FLOW_ITEM_TYPE_META:
13520                         flow_dv_translate_item_meta(dev, match_mask,
13521                                                     match_value, attr, items);
13522                         last_item = MLX5_FLOW_ITEM_METADATA;
13523                         break;
13524                 case RTE_FLOW_ITEM_TYPE_ICMP:
13525                         flow_dv_translate_item_icmp(match_mask, match_value,
13526                                                     items, tunnel);
13527                         last_item = MLX5_FLOW_LAYER_ICMP;
13528                         break;
13529                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13530                         flow_dv_translate_item_icmp6(match_mask, match_value,
13531                                                       items, tunnel);
13532                         last_item = MLX5_FLOW_LAYER_ICMP6;
13533                         break;
13534                 case RTE_FLOW_ITEM_TYPE_TAG:
13535                         flow_dv_translate_item_tag(dev, match_mask,
13536                                                    match_value, items);
13537                         last_item = MLX5_FLOW_ITEM_TAG;
13538                         break;
13539                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13540                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13541                                                         match_value, items);
13542                         last_item = MLX5_FLOW_ITEM_TAG;
13543                         break;
13544                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13545                         flow_dv_translate_item_tx_queue(dev, match_mask,
13546                                                         match_value,
13547                                                         items);
13548                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13549                         break;
13550                 case RTE_FLOW_ITEM_TYPE_GTP:
13551                         flow_dv_translate_item_gtp(match_mask, match_value,
13552                                                    items, tunnel);
13553                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13554                         last_item = MLX5_FLOW_LAYER_GTP;
13555                         break;
13556                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13557                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13558                                                           match_value,
13559                                                           items);
13560                         if (ret)
13561                                 return rte_flow_error_set(error, -ret,
13562                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13563                                         "cannot create GTP PSC item");
13564                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13565                         break;
13566                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13567                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13568                                 /* Create it only the first time to be used. */
13569                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13570                                 if (ret)
13571                                         return rte_flow_error_set
13572                                                 (error, -ret,
13573                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13574                                                 NULL,
13575                                                 "cannot create eCPRI parser");
13576                         }
13577                         flow_dv_translate_item_ecpri(dev, match_mask,
13578                                                      match_value, items,
13579                                                      last_item);
13580                         /* No other protocol should follow eCPRI layer. */
13581                         last_item = MLX5_FLOW_LAYER_ECPRI;
13582                         break;
13583                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13584                         flow_dv_translate_item_integrity(items, integrity_items,
13585                                                          &last_item);
13586                         break;
13587                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13588                         flow_dv_translate_item_aso_ct(dev, match_mask,
13589                                                       match_value, items);
13590                         break;
13591                 case RTE_FLOW_ITEM_TYPE_FLEX:
13592                         flow_dv_translate_item_flex(dev, match_mask,
13593                                                     match_value, items,
13594                                                     dev_flow, tunnel != 0);
13595                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13596                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13597                         break;
13598                 default:
13599                         break;
13600                 }
13601                 item_flags |= last_item;
13602         }
13603         /*
13604          * When E-Switch mode is enabled, we have two cases where we need to
13605          * set the source port manually.
13606          * The first one, is in case of Nic steering rule, and the second is
13607          * E-Switch rule where no port_id item was found. In both cases
13608          * the source port is set according the current port in use.
13609          */
13610         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13611             (priv->representor || priv->master)) {
13612                 if (flow_dv_translate_item_port_id(dev, match_mask,
13613                                                    match_value, NULL, attr))
13614                         return -rte_errno;
13615         }
13616         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13617                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13618                                                       integrity_items,
13619                                                       item_flags);
13620         }
13621         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13622                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13623                                                  tunnel_item, item_flags);
13624         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13625                 flow_dv_translate_item_geneve(match_mask, match_value,
13626                                               tunnel_item, item_flags);
13627         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13628                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13629                         flow_dv_translate_item_gre(match_mask, match_value,
13630                                                    tunnel_item, item_flags);
13631                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13632                         flow_dv_translate_item_nvgre(match_mask, match_value,
13633                                                      tunnel_item, item_flags);
13634                 else
13635                         MLX5_ASSERT(false);
13636         }
13637 #ifdef RTE_LIBRTE_MLX5_DEBUG
13638         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13639                                               dev_flow->dv.value.buf));
13640 #endif
13641         /*
13642          * Layers may be already initialized from prefix flow if this dev_flow
13643          * is the suffix flow.
13644          */
13645         handle->layers |= item_flags;
13646         if (action_flags & MLX5_FLOW_ACTION_RSS)
13647                 flow_dv_hashfields_set(dev_flow, rss_desc);
13648         /* If has RSS action in the sample action, the Sample/Mirror resource
13649          * should be registered after the hash filed be update.
13650          */
13651         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13652                 ret = flow_dv_translate_action_sample(dev,
13653                                                       sample,
13654                                                       dev_flow, attr,
13655                                                       &num_of_dest,
13656                                                       sample_actions,
13657                                                       &sample_res,
13658                                                       error);
13659                 if (ret < 0)
13660                         return ret;
13661                 ret = flow_dv_create_action_sample(dev,
13662                                                    dev_flow,
13663                                                    num_of_dest,
13664                                                    &sample_res,
13665                                                    &mdest_res,
13666                                                    sample_actions,
13667                                                    action_flags,
13668                                                    error);
13669                 if (ret < 0)
13670                         return rte_flow_error_set
13671                                                 (error, rte_errno,
13672                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13673                                                 NULL,
13674                                                 "cannot create sample action");
13675                 if (num_of_dest > 1) {
13676                         dev_flow->dv.actions[sample_act_pos] =
13677                         dev_flow->dv.dest_array_res->action;
13678                 } else {
13679                         dev_flow->dv.actions[sample_act_pos] =
13680                         dev_flow->dv.sample_res->verbs_action;
13681                 }
13682         }
13683         /*
13684          * For multiple destination (sample action with ratio=1), the encap
13685          * action and port id action will be combined into group action.
13686          * So need remove the original these actions in the flow and only
13687          * use the sample action instead of.
13688          */
13689         if (num_of_dest > 1 &&
13690             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13691                 int i;
13692                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13693
13694                 for (i = 0; i < actions_n; i++) {
13695                         if ((sample_act->dr_encap_action &&
13696                                 sample_act->dr_encap_action ==
13697                                 dev_flow->dv.actions[i]) ||
13698                                 (sample_act->dr_port_id_action &&
13699                                 sample_act->dr_port_id_action ==
13700                                 dev_flow->dv.actions[i]) ||
13701                                 (sample_act->dr_jump_action &&
13702                                 sample_act->dr_jump_action ==
13703                                 dev_flow->dv.actions[i]))
13704                                 continue;
13705                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13706                 }
13707                 memcpy((void *)dev_flow->dv.actions,
13708                                 (void *)temp_actions,
13709                                 tmp_actions_n * sizeof(void *));
13710                 actions_n = tmp_actions_n;
13711         }
13712         dev_flow->dv.actions_n = actions_n;
13713         dev_flow->act_flags = action_flags;
13714         if (wks->skip_matcher_reg)
13715                 return 0;
13716         /* Register matcher. */
13717         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13718                                     matcher.mask.size);
13719         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13720                                                      matcher.priority,
13721                                                      dev_flow->external);
13722         /**
13723          * When creating meter drop flow in drop table, using original
13724          * 5-tuple match, the matcher priority should be lower than
13725          * mtr_id matcher.
13726          */
13727         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13728             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13729             matcher.priority <= MLX5_REG_BITS)
13730                 matcher.priority += MLX5_REG_BITS;
13731         /* reserved field no needs to be set to 0 here. */
13732         tbl_key.is_fdb = attr->transfer;
13733         tbl_key.is_egress = attr->egress;
13734         tbl_key.level = dev_flow->dv.group;
13735         tbl_key.id = dev_flow->dv.table_id;
13736         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13737                                      tunnel, attr->group, error))
13738                 return -rte_errno;
13739         return 0;
13740 }
13741
13742 /**
13743  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13744  * and tunnel.
13745  *
13746  * @param[in, out] action
13747  *   Shred RSS action holding hash RX queue objects.
13748  * @param[in] hash_fields
13749  *   Defines combination of packet fields to participate in RX hash.
13750  * @param[in] tunnel
13751  *   Tunnel type
13752  * @param[in] hrxq_idx
13753  *   Hash RX queue index to set.
13754  *
13755  * @return
13756  *   0 on success, otherwise negative errno value.
13757  */
13758 static int
13759 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13760                               const uint64_t hash_fields,
13761                               uint32_t hrxq_idx)
13762 {
13763         uint32_t *hrxqs = action->hrxq;
13764
13765         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13766         case MLX5_RSS_HASH_IPV4:
13767                 /* fall-through. */
13768         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13769                 /* fall-through. */
13770         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13771                 hrxqs[0] = hrxq_idx;
13772                 return 0;
13773         case MLX5_RSS_HASH_IPV4_TCP:
13774                 /* fall-through. */
13775         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13776                 /* fall-through. */
13777         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13778                 hrxqs[1] = hrxq_idx;
13779                 return 0;
13780         case MLX5_RSS_HASH_IPV4_UDP:
13781                 /* fall-through. */
13782         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13783                 /* fall-through. */
13784         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13785                 hrxqs[2] = hrxq_idx;
13786                 return 0;
13787         case MLX5_RSS_HASH_IPV6:
13788                 /* fall-through. */
13789         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13790                 /* fall-through. */
13791         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13792                 hrxqs[3] = hrxq_idx;
13793                 return 0;
13794         case MLX5_RSS_HASH_IPV6_TCP:
13795                 /* fall-through. */
13796         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13797                 /* fall-through. */
13798         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13799                 hrxqs[4] = hrxq_idx;
13800                 return 0;
13801         case MLX5_RSS_HASH_IPV6_UDP:
13802                 /* fall-through. */
13803         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13804                 /* fall-through. */
13805         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13806                 hrxqs[5] = hrxq_idx;
13807                 return 0;
13808         case MLX5_RSS_HASH_NONE:
13809                 hrxqs[6] = hrxq_idx;
13810                 return 0;
13811         default:
13812                 return -1;
13813         }
13814 }
13815
13816 /**
13817  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13818  * and tunnel.
13819  *
13820  * @param[in] dev
13821  *   Pointer to the Ethernet device structure.
13822  * @param[in] idx
13823  *   Shared RSS action ID holding hash RX queue objects.
13824  * @param[in] hash_fields
13825  *   Defines combination of packet fields to participate in RX hash.
13826  * @param[in] tunnel
13827  *   Tunnel type
13828  *
13829  * @return
13830  *   Valid hash RX queue index, otherwise 0.
13831  */
13832 static uint32_t
13833 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13834                                  const uint64_t hash_fields)
13835 {
13836         struct mlx5_priv *priv = dev->data->dev_private;
13837         struct mlx5_shared_action_rss *shared_rss =
13838             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13839         const uint32_t *hrxqs = shared_rss->hrxq;
13840
13841         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13842         case MLX5_RSS_HASH_IPV4:
13843                 /* fall-through. */
13844         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13845                 /* fall-through. */
13846         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13847                 return hrxqs[0];
13848         case MLX5_RSS_HASH_IPV4_TCP:
13849                 /* fall-through. */
13850         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13851                 /* fall-through. */
13852         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13853                 return hrxqs[1];
13854         case MLX5_RSS_HASH_IPV4_UDP:
13855                 /* fall-through. */
13856         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13857                 /* fall-through. */
13858         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13859                 return hrxqs[2];
13860         case MLX5_RSS_HASH_IPV6:
13861                 /* fall-through. */
13862         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13863                 /* fall-through. */
13864         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13865                 return hrxqs[3];
13866         case MLX5_RSS_HASH_IPV6_TCP:
13867                 /* fall-through. */
13868         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13869                 /* fall-through. */
13870         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13871                 return hrxqs[4];
13872         case MLX5_RSS_HASH_IPV6_UDP:
13873                 /* fall-through. */
13874         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13875                 /* fall-through. */
13876         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13877                 return hrxqs[5];
13878         case MLX5_RSS_HASH_NONE:
13879                 return hrxqs[6];
13880         default:
13881                 return 0;
13882         }
13883
13884 }
13885
13886 /**
13887  * Apply the flow to the NIC, lock free,
13888  * (mutex should be acquired by caller).
13889  *
13890  * @param[in] dev
13891  *   Pointer to the Ethernet device structure.
13892  * @param[in, out] flow
13893  *   Pointer to flow structure.
13894  * @param[out] error
13895  *   Pointer to error structure.
13896  *
13897  * @return
13898  *   0 on success, a negative errno value otherwise and rte_errno is set.
13899  */
13900 static int
13901 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13902               struct rte_flow_error *error)
13903 {
13904         struct mlx5_flow_dv_workspace *dv;
13905         struct mlx5_flow_handle *dh;
13906         struct mlx5_flow_handle_dv *dv_h;
13907         struct mlx5_flow *dev_flow;
13908         struct mlx5_priv *priv = dev->data->dev_private;
13909         uint32_t handle_idx;
13910         int n;
13911         int err;
13912         int idx;
13913         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13914         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13915         uint8_t misc_mask;
13916
13917         MLX5_ASSERT(wks);
13918         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13919                 dev_flow = &wks->flows[idx];
13920                 dv = &dev_flow->dv;
13921                 dh = dev_flow->handle;
13922                 dv_h = &dh->dvh;
13923                 n = dv->actions_n;
13924                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13925                         if (dv->transfer) {
13926                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13927                                 dv->actions[n++] = priv->sh->dr_drop_action;
13928                         } else {
13929 #ifdef HAVE_MLX5DV_DR
13930                                 /* DR supports drop action placeholder. */
13931                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13932                                 dv->actions[n++] = dv->group ?
13933                                         priv->sh->dr_drop_action :
13934                                         priv->root_drop_action;
13935 #else
13936                                 /* For DV we use the explicit drop queue. */
13937                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13938                                 dv->actions[n++] =
13939                                                 priv->drop_queue.hrxq->action;
13940 #endif
13941                         }
13942                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13943                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13944                         struct mlx5_hrxq *hrxq;
13945                         uint32_t hrxq_idx;
13946
13947                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13948                                                     &hrxq_idx);
13949                         if (!hrxq) {
13950                                 rte_flow_error_set
13951                                         (error, rte_errno,
13952                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13953                                          "cannot get hash queue");
13954                                 goto error;
13955                         }
13956                         dh->rix_hrxq = hrxq_idx;
13957                         dv->actions[n++] = hrxq->action;
13958                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13959                         struct mlx5_hrxq *hrxq = NULL;
13960                         uint32_t hrxq_idx;
13961
13962                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13963                                                 rss_desc->shared_rss,
13964                                                 dev_flow->hash_fields);
13965                         if (hrxq_idx)
13966                                 hrxq = mlx5_ipool_get
13967                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13968                                          hrxq_idx);
13969                         if (!hrxq) {
13970                                 rte_flow_error_set
13971                                         (error, rte_errno,
13972                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13973                                          "cannot get hash queue");
13974                                 goto error;
13975                         }
13976                         dh->rix_srss = rss_desc->shared_rss;
13977                         dv->actions[n++] = hrxq->action;
13978                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13979                         if (!priv->sh->default_miss_action) {
13980                                 rte_flow_error_set
13981                                         (error, rte_errno,
13982                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13983                                          "default miss action not be created.");
13984                                 goto error;
13985                         }
13986                         dv->actions[n++] = priv->sh->default_miss_action;
13987                 }
13988                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13989                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13990                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13991                                                (void *)&dv->value, n,
13992                                                dv->actions, &dh->drv_flow);
13993                 if (err) {
13994                         rte_flow_error_set
13995                                 (error, errno,
13996                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13997                                 NULL,
13998                                 (!priv->config.allow_duplicate_pattern &&
13999                                 errno == EEXIST) ?
14000                                 "duplicating pattern is not allowed" :
14001                                 "hardware refuses to create flow");
14002                         goto error;
14003                 }
14004                 if (priv->vmwa_context &&
14005                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14006                         /*
14007                          * The rule contains the VLAN pattern.
14008                          * For VF we are going to create VLAN
14009                          * interface to make hypervisor set correct
14010                          * e-Switch vport context.
14011                          */
14012                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14013                 }
14014         }
14015         return 0;
14016 error:
14017         err = rte_errno; /* Save rte_errno before cleanup. */
14018         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14019                        handle_idx, dh, next) {
14020                 /* hrxq is union, don't clear it if the flag is not set. */
14021                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14022                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14023                         dh->rix_hrxq = 0;
14024                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14025                         dh->rix_srss = 0;
14026                 }
14027                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14028                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14029         }
14030         rte_errno = err; /* Restore rte_errno. */
14031         return -rte_errno;
14032 }
14033
14034 void
14035 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14036                           struct mlx5_list_entry *entry)
14037 {
14038         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14039                                                              typeof(*resource),
14040                                                              entry);
14041
14042         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14043         mlx5_free(resource);
14044 }
14045
14046 /**
14047  * Release the flow matcher.
14048  *
14049  * @param dev
14050  *   Pointer to Ethernet device.
14051  * @param port_id
14052  *   Index to port ID action resource.
14053  *
14054  * @return
14055  *   1 while a reference on it exists, 0 when freed.
14056  */
14057 static int
14058 flow_dv_matcher_release(struct rte_eth_dev *dev,
14059                         struct mlx5_flow_handle *handle)
14060 {
14061         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14062         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14063                                                             typeof(*tbl), tbl);
14064         int ret;
14065
14066         MLX5_ASSERT(matcher->matcher_object);
14067         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14068         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14069         return ret;
14070 }
14071
14072 void
14073 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14074 {
14075         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14076         struct mlx5_flow_dv_encap_decap_resource *res =
14077                                        container_of(entry, typeof(*res), entry);
14078
14079         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14080         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14081 }
14082
14083 /**
14084  * Release an encap/decap resource.
14085  *
14086  * @param dev
14087  *   Pointer to Ethernet device.
14088  * @param encap_decap_idx
14089  *   Index of encap decap resource.
14090  *
14091  * @return
14092  *   1 while a reference on it exists, 0 when freed.
14093  */
14094 static int
14095 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14096                                      uint32_t encap_decap_idx)
14097 {
14098         struct mlx5_priv *priv = dev->data->dev_private;
14099         struct mlx5_flow_dv_encap_decap_resource *resource;
14100
14101         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14102                                   encap_decap_idx);
14103         if (!resource)
14104                 return 0;
14105         MLX5_ASSERT(resource->action);
14106         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14107 }
14108
14109 /**
14110  * Release an jump to table action resource.
14111  *
14112  * @param dev
14113  *   Pointer to Ethernet device.
14114  * @param rix_jump
14115  *   Index to the jump action resource.
14116  *
14117  * @return
14118  *   1 while a reference on it exists, 0 when freed.
14119  */
14120 static int
14121 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14122                                   uint32_t rix_jump)
14123 {
14124         struct mlx5_priv *priv = dev->data->dev_private;
14125         struct mlx5_flow_tbl_data_entry *tbl_data;
14126
14127         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14128                                   rix_jump);
14129         if (!tbl_data)
14130                 return 0;
14131         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14132 }
14133
14134 void
14135 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14136 {
14137         struct mlx5_flow_dv_modify_hdr_resource *res =
14138                 container_of(entry, typeof(*res), entry);
14139         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14140
14141         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14142         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14143 }
14144
14145 /**
14146  * Release a modify-header resource.
14147  *
14148  * @param dev
14149  *   Pointer to Ethernet device.
14150  * @param handle
14151  *   Pointer to mlx5_flow_handle.
14152  *
14153  * @return
14154  *   1 while a reference on it exists, 0 when freed.
14155  */
14156 static int
14157 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14158                                     struct mlx5_flow_handle *handle)
14159 {
14160         struct mlx5_priv *priv = dev->data->dev_private;
14161         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14162
14163         MLX5_ASSERT(entry->action);
14164         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14165 }
14166
14167 void
14168 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14169 {
14170         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14171         struct mlx5_flow_dv_port_id_action_resource *resource =
14172                                   container_of(entry, typeof(*resource), entry);
14173
14174         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14175         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14176 }
14177
14178 /**
14179  * Release port ID action resource.
14180  *
14181  * @param dev
14182  *   Pointer to Ethernet device.
14183  * @param handle
14184  *   Pointer to mlx5_flow_handle.
14185  *
14186  * @return
14187  *   1 while a reference on it exists, 0 when freed.
14188  */
14189 static int
14190 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14191                                         uint32_t port_id)
14192 {
14193         struct mlx5_priv *priv = dev->data->dev_private;
14194         struct mlx5_flow_dv_port_id_action_resource *resource;
14195
14196         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14197         if (!resource)
14198                 return 0;
14199         MLX5_ASSERT(resource->action);
14200         return mlx5_list_unregister(priv->sh->port_id_action_list,
14201                                     &resource->entry);
14202 }
14203
14204 /**
14205  * Release shared RSS action resource.
14206  *
14207  * @param dev
14208  *   Pointer to Ethernet device.
14209  * @param srss
14210  *   Shared RSS action index.
14211  */
14212 static void
14213 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14214 {
14215         struct mlx5_priv *priv = dev->data->dev_private;
14216         struct mlx5_shared_action_rss *shared_rss;
14217
14218         shared_rss = mlx5_ipool_get
14219                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14220         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14221 }
14222
14223 void
14224 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14225 {
14226         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14227         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14228                         container_of(entry, typeof(*resource), entry);
14229
14230         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14231         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14232 }
14233
14234 /**
14235  * Release push vlan action resource.
14236  *
14237  * @param dev
14238  *   Pointer to Ethernet device.
14239  * @param handle
14240  *   Pointer to mlx5_flow_handle.
14241  *
14242  * @return
14243  *   1 while a reference on it exists, 0 when freed.
14244  */
14245 static int
14246 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14247                                           struct mlx5_flow_handle *handle)
14248 {
14249         struct mlx5_priv *priv = dev->data->dev_private;
14250         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14251         uint32_t idx = handle->dvh.rix_push_vlan;
14252
14253         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14254         if (!resource)
14255                 return 0;
14256         MLX5_ASSERT(resource->action);
14257         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14258                                     &resource->entry);
14259 }
14260
14261 /**
14262  * Release the fate resource.
14263  *
14264  * @param dev
14265  *   Pointer to Ethernet device.
14266  * @param handle
14267  *   Pointer to mlx5_flow_handle.
14268  */
14269 static void
14270 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14271                                struct mlx5_flow_handle *handle)
14272 {
14273         if (!handle->rix_fate)
14274                 return;
14275         switch (handle->fate_action) {
14276         case MLX5_FLOW_FATE_QUEUE:
14277                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14278                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14279                 break;
14280         case MLX5_FLOW_FATE_JUMP:
14281                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14282                 break;
14283         case MLX5_FLOW_FATE_PORT_ID:
14284                 flow_dv_port_id_action_resource_release(dev,
14285                                 handle->rix_port_id_action);
14286                 break;
14287         default:
14288                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14289                 break;
14290         }
14291         handle->rix_fate = 0;
14292 }
14293
14294 void
14295 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14296                          struct mlx5_list_entry *entry)
14297 {
14298         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14299                                                               typeof(*resource),
14300                                                               entry);
14301         struct rte_eth_dev *dev = resource->dev;
14302         struct mlx5_priv *priv = dev->data->dev_private;
14303
14304         if (resource->verbs_action)
14305                 claim_zero(mlx5_flow_os_destroy_flow_action
14306                                                       (resource->verbs_action));
14307         if (resource->normal_path_tbl)
14308                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14309                                              resource->normal_path_tbl);
14310         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14311         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14312         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14313 }
14314
14315 /**
14316  * Release an sample resource.
14317  *
14318  * @param dev
14319  *   Pointer to Ethernet device.
14320  * @param handle
14321  *   Pointer to mlx5_flow_handle.
14322  *
14323  * @return
14324  *   1 while a reference on it exists, 0 when freed.
14325  */
14326 static int
14327 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14328                                      struct mlx5_flow_handle *handle)
14329 {
14330         struct mlx5_priv *priv = dev->data->dev_private;
14331         struct mlx5_flow_dv_sample_resource *resource;
14332
14333         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14334                                   handle->dvh.rix_sample);
14335         if (!resource)
14336                 return 0;
14337         MLX5_ASSERT(resource->verbs_action);
14338         return mlx5_list_unregister(priv->sh->sample_action_list,
14339                                     &resource->entry);
14340 }
14341
14342 void
14343 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14344                              struct mlx5_list_entry *entry)
14345 {
14346         struct mlx5_flow_dv_dest_array_resource *resource =
14347                         container_of(entry, typeof(*resource), entry);
14348         struct rte_eth_dev *dev = resource->dev;
14349         struct mlx5_priv *priv = dev->data->dev_private;
14350         uint32_t i = 0;
14351
14352         MLX5_ASSERT(resource->action);
14353         if (resource->action)
14354                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14355         for (; i < resource->num_of_dest; i++)
14356                 flow_dv_sample_sub_actions_release(dev,
14357                                                    &resource->sample_idx[i]);
14358         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14359         DRV_LOG(DEBUG, "destination array resource %p: removed",
14360                 (void *)resource);
14361 }
14362
14363 /**
14364  * Release an destination array resource.
14365  *
14366  * @param dev
14367  *   Pointer to Ethernet device.
14368  * @param handle
14369  *   Pointer to mlx5_flow_handle.
14370  *
14371  * @return
14372  *   1 while a reference on it exists, 0 when freed.
14373  */
14374 static int
14375 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14376                                     struct mlx5_flow_handle *handle)
14377 {
14378         struct mlx5_priv *priv = dev->data->dev_private;
14379         struct mlx5_flow_dv_dest_array_resource *resource;
14380
14381         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14382                                   handle->dvh.rix_dest_array);
14383         if (!resource)
14384                 return 0;
14385         MLX5_ASSERT(resource->action);
14386         return mlx5_list_unregister(priv->sh->dest_array_list,
14387                                     &resource->entry);
14388 }
14389
14390 static void
14391 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14392 {
14393         struct mlx5_priv *priv = dev->data->dev_private;
14394         struct mlx5_dev_ctx_shared *sh = priv->sh;
14395         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14396                                 sh->geneve_tlv_option_resource;
14397         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14398         if (geneve_opt_resource) {
14399                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14400                                          __ATOMIC_RELAXED))) {
14401                         claim_zero(mlx5_devx_cmd_destroy
14402                                         (geneve_opt_resource->obj));
14403                         mlx5_free(sh->geneve_tlv_option_resource);
14404                         sh->geneve_tlv_option_resource = NULL;
14405                 }
14406         }
14407         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14408 }
14409
14410 /**
14411  * Remove the flow from the NIC but keeps it in memory.
14412  * Lock free, (mutex should be acquired by caller).
14413  *
14414  * @param[in] dev
14415  *   Pointer to Ethernet device.
14416  * @param[in, out] flow
14417  *   Pointer to flow structure.
14418  */
14419 static void
14420 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14421 {
14422         struct mlx5_flow_handle *dh;
14423         uint32_t handle_idx;
14424         struct mlx5_priv *priv = dev->data->dev_private;
14425
14426         if (!flow)
14427                 return;
14428         handle_idx = flow->dev_handles;
14429         while (handle_idx) {
14430                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14431                                     handle_idx);
14432                 if (!dh)
14433                         return;
14434                 if (dh->drv_flow) {
14435                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14436                         dh->drv_flow = NULL;
14437                 }
14438                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14439                         flow_dv_fate_resource_release(dev, dh);
14440                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14441                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14442                 handle_idx = dh->next.next;
14443         }
14444 }
14445
14446 /**
14447  * Remove the flow from the NIC and the memory.
14448  * Lock free, (mutex should be acquired by caller).
14449  *
14450  * @param[in] dev
14451  *   Pointer to the Ethernet device structure.
14452  * @param[in, out] flow
14453  *   Pointer to flow structure.
14454  */
14455 static void
14456 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14457 {
14458         struct mlx5_flow_handle *dev_handle;
14459         struct mlx5_priv *priv = dev->data->dev_private;
14460         struct mlx5_flow_meter_info *fm = NULL;
14461         uint32_t srss = 0;
14462
14463         if (!flow)
14464                 return;
14465         flow_dv_remove(dev, flow);
14466         if (flow->counter) {
14467                 flow_dv_counter_free(dev, flow->counter);
14468                 flow->counter = 0;
14469         }
14470         if (flow->meter) {
14471                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14472                 if (fm)
14473                         mlx5_flow_meter_detach(priv, fm);
14474                 flow->meter = 0;
14475         }
14476         /* Keep the current age handling by default. */
14477         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14478                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14479         else if (flow->age)
14480                 flow_dv_aso_age_release(dev, flow->age);
14481         if (flow->geneve_tlv_option) {
14482                 flow_dv_geneve_tlv_option_resource_release(dev);
14483                 flow->geneve_tlv_option = 0;
14484         }
14485         while (flow->dev_handles) {
14486                 uint32_t tmp_idx = flow->dev_handles;
14487
14488                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14489                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14490                 if (!dev_handle)
14491                         return;
14492                 flow->dev_handles = dev_handle->next.next;
14493                 while (dev_handle->flex_item) {
14494                         int index = rte_bsf32(dev_handle->flex_item);
14495
14496                         mlx5_flex_release_index(dev, index);
14497                         dev_handle->flex_item &= ~RTE_BIT32(index);
14498                 }
14499                 if (dev_handle->dvh.matcher)
14500                         flow_dv_matcher_release(dev, dev_handle);
14501                 if (dev_handle->dvh.rix_sample)
14502                         flow_dv_sample_resource_release(dev, dev_handle);
14503                 if (dev_handle->dvh.rix_dest_array)
14504                         flow_dv_dest_array_resource_release(dev, dev_handle);
14505                 if (dev_handle->dvh.rix_encap_decap)
14506                         flow_dv_encap_decap_resource_release(dev,
14507                                 dev_handle->dvh.rix_encap_decap);
14508                 if (dev_handle->dvh.modify_hdr)
14509                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14510                 if (dev_handle->dvh.rix_push_vlan)
14511                         flow_dv_push_vlan_action_resource_release(dev,
14512                                                                   dev_handle);
14513                 if (dev_handle->dvh.rix_tag)
14514                         flow_dv_tag_release(dev,
14515                                             dev_handle->dvh.rix_tag);
14516                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14517                         flow_dv_fate_resource_release(dev, dev_handle);
14518                 else if (!srss)
14519                         srss = dev_handle->rix_srss;
14520                 if (fm && dev_handle->is_meter_flow_id &&
14521                     dev_handle->split_flow_id)
14522                         mlx5_ipool_free(fm->flow_ipool,
14523                                         dev_handle->split_flow_id);
14524                 else if (dev_handle->split_flow_id &&
14525                     !dev_handle->is_meter_flow_id)
14526                         mlx5_ipool_free(priv->sh->ipool
14527                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14528                                         dev_handle->split_flow_id);
14529                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14530                            tmp_idx);
14531         }
14532         if (srss)
14533                 flow_dv_shared_rss_action_release(dev, srss);
14534 }
14535
14536 /**
14537  * Release array of hash RX queue objects.
14538  * Helper function.
14539  *
14540  * @param[in] dev
14541  *   Pointer to the Ethernet device structure.
14542  * @param[in, out] hrxqs
14543  *   Array of hash RX queue objects.
14544  *
14545  * @return
14546  *   Total number of references to hash RX queue objects in *hrxqs* array
14547  *   after this operation.
14548  */
14549 static int
14550 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14551                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14552 {
14553         size_t i;
14554         int remaining = 0;
14555
14556         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14557                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14558
14559                 if (!ret)
14560                         (*hrxqs)[i] = 0;
14561                 remaining += ret;
14562         }
14563         return remaining;
14564 }
14565
14566 /**
14567  * Release all hash RX queue objects representing shared RSS action.
14568  *
14569  * @param[in] dev
14570  *   Pointer to the Ethernet device structure.
14571  * @param[in, out] action
14572  *   Shared RSS action to remove hash RX queue objects from.
14573  *
14574  * @return
14575  *   Total number of references to hash RX queue objects stored in *action*
14576  *   after this operation.
14577  *   Expected to be 0 if no external references held.
14578  */
14579 static int
14580 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14581                                  struct mlx5_shared_action_rss *shared_rss)
14582 {
14583         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14584 }
14585
14586 /**
14587  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14588  * user input.
14589  *
14590  * Only one hash value is available for one L3+L4 combination:
14591  * for example:
14592  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14593  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14594  * same slot in mlx5_rss_hash_fields.
14595  *
14596  * @param[in] rss
14597  *   Pointer to the shared action RSS conf.
14598  * @param[in, out] hash_field
14599  *   hash_field variable needed to be adjusted.
14600  *
14601  * @return
14602  *   void
14603  */
14604 static void
14605 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14606                                      uint64_t *hash_field)
14607 {
14608         uint64_t rss_types = rss->origin.types;
14609
14610         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14611         case MLX5_RSS_HASH_IPV4:
14612                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14613                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14614                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14615                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14616                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14617                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14618                         else
14619                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14620                 }
14621                 return;
14622         case MLX5_RSS_HASH_IPV6:
14623                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14624                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14625                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14626                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14627                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14628                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14629                         else
14630                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14631                 }
14632                 return;
14633         case MLX5_RSS_HASH_IPV4_UDP:
14634                 /* fall-through. */
14635         case MLX5_RSS_HASH_IPV6_UDP:
14636                 if (rss_types & RTE_ETH_RSS_UDP) {
14637                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14638                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14639                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14640                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14641                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14642                         else
14643                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14644                 }
14645                 return;
14646         case MLX5_RSS_HASH_IPV4_TCP:
14647                 /* fall-through. */
14648         case MLX5_RSS_HASH_IPV6_TCP:
14649                 if (rss_types & RTE_ETH_RSS_TCP) {
14650                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14651                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14652                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14653                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14654                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14655                         else
14656                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14657                 }
14658                 return;
14659         default:
14660                 return;
14661         }
14662 }
14663
14664 /**
14665  * Setup shared RSS action.
14666  * Prepare set of hash RX queue objects sufficient to handle all valid
14667  * hash_fields combinations (see enum ibv_rx_hash_fields).
14668  *
14669  * @param[in] dev
14670  *   Pointer to the Ethernet device structure.
14671  * @param[in] action_idx
14672  *   Shared RSS action ipool index.
14673  * @param[in, out] action
14674  *   Partially initialized shared RSS action.
14675  * @param[out] error
14676  *   Perform verbose error reporting if not NULL. Initialized in case of
14677  *   error only.
14678  *
14679  * @return
14680  *   0 on success, otherwise negative errno value.
14681  */
14682 static int
14683 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14684                            uint32_t action_idx,
14685                            struct mlx5_shared_action_rss *shared_rss,
14686                            struct rte_flow_error *error)
14687 {
14688         struct mlx5_flow_rss_desc rss_desc = { 0 };
14689         size_t i;
14690         int err;
14691
14692         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
14693                                      !!dev->data->dev_started)) {
14694                 return rte_flow_error_set(error, rte_errno,
14695                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14696                                           "cannot setup indirection table");
14697         }
14698         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14699         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14700         rss_desc.const_q = shared_rss->origin.queue;
14701         rss_desc.queue_num = shared_rss->origin.queue_num;
14702         /* Set non-zero value to indicate a shared RSS. */
14703         rss_desc.shared_rss = action_idx;
14704         rss_desc.ind_tbl = shared_rss->ind_tbl;
14705         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14706                 uint32_t hrxq_idx;
14707                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14708                 int tunnel = 0;
14709
14710                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14711                 if (shared_rss->origin.level > 1) {
14712                         hash_fields |= IBV_RX_HASH_INNER;
14713                         tunnel = 1;
14714                 }
14715                 rss_desc.tunnel = tunnel;
14716                 rss_desc.hash_fields = hash_fields;
14717                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14718                 if (!hrxq_idx) {
14719                         rte_flow_error_set
14720                                 (error, rte_errno,
14721                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14722                                  "cannot get hash queue");
14723                         goto error_hrxq_new;
14724                 }
14725                 err = __flow_dv_action_rss_hrxq_set
14726                         (shared_rss, hash_fields, hrxq_idx);
14727                 MLX5_ASSERT(!err);
14728         }
14729         return 0;
14730 error_hrxq_new:
14731         err = rte_errno;
14732         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14733         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
14734                 shared_rss->ind_tbl = NULL;
14735         rte_errno = err;
14736         return -rte_errno;
14737 }
14738
14739 /**
14740  * Create shared RSS action.
14741  *
14742  * @param[in] dev
14743  *   Pointer to the Ethernet device structure.
14744  * @param[in] conf
14745  *   Shared action configuration.
14746  * @param[in] rss
14747  *   RSS action specification used to create shared action.
14748  * @param[out] error
14749  *   Perform verbose error reporting if not NULL. Initialized in case of
14750  *   error only.
14751  *
14752  * @return
14753  *   A valid shared action ID in case of success, 0 otherwise and
14754  *   rte_errno is set.
14755  */
14756 static uint32_t
14757 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14758                             const struct rte_flow_indir_action_conf *conf,
14759                             const struct rte_flow_action_rss *rss,
14760                             struct rte_flow_error *error)
14761 {
14762         struct mlx5_priv *priv = dev->data->dev_private;
14763         struct mlx5_shared_action_rss *shared_rss = NULL;
14764         void *queue = NULL;
14765         struct rte_flow_action_rss *origin;
14766         const uint8_t *rss_key;
14767         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14768         uint32_t idx;
14769
14770         RTE_SET_USED(conf);
14771         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14772                             0, SOCKET_ID_ANY);
14773         shared_rss = mlx5_ipool_zmalloc
14774                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14775         if (!shared_rss || !queue) {
14776                 rte_flow_error_set(error, ENOMEM,
14777                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14778                                    "cannot allocate resource memory");
14779                 goto error_rss_init;
14780         }
14781         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14782                 rte_flow_error_set(error, E2BIG,
14783                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14784                                    "rss action number out of range");
14785                 goto error_rss_init;
14786         }
14787         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14788                                           sizeof(*shared_rss->ind_tbl),
14789                                           0, SOCKET_ID_ANY);
14790         if (!shared_rss->ind_tbl) {
14791                 rte_flow_error_set(error, ENOMEM,
14792                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14793                                    "cannot allocate resource memory");
14794                 goto error_rss_init;
14795         }
14796         memcpy(queue, rss->queue, queue_size);
14797         shared_rss->ind_tbl->queues = queue;
14798         shared_rss->ind_tbl->queues_n = rss->queue_num;
14799         origin = &shared_rss->origin;
14800         origin->func = rss->func;
14801         origin->level = rss->level;
14802         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14803         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14804         /* NULL RSS key indicates default RSS key. */
14805         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14806         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14807         origin->key = &shared_rss->key[0];
14808         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14809         origin->queue = queue;
14810         origin->queue_num = rss->queue_num;
14811         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14812                 goto error_rss_init;
14813         rte_spinlock_init(&shared_rss->action_rss_sl);
14814         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14815         rte_spinlock_lock(&priv->shared_act_sl);
14816         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14817                      &priv->rss_shared_actions, idx, shared_rss, next);
14818         rte_spinlock_unlock(&priv->shared_act_sl);
14819         return idx;
14820 error_rss_init:
14821         if (shared_rss) {
14822                 if (shared_rss->ind_tbl)
14823                         mlx5_free(shared_rss->ind_tbl);
14824                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14825                                 idx);
14826         }
14827         if (queue)
14828                 mlx5_free(queue);
14829         return 0;
14830 }
14831
14832 /**
14833  * Destroy the shared RSS action.
14834  * Release related hash RX queue objects.
14835  *
14836  * @param[in] dev
14837  *   Pointer to the Ethernet device structure.
14838  * @param[in] idx
14839  *   The shared RSS action object ID to be removed.
14840  * @param[out] error
14841  *   Perform verbose error reporting if not NULL. Initialized in case of
14842  *   error only.
14843  *
14844  * @return
14845  *   0 on success, otherwise negative errno value.
14846  */
14847 static int
14848 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14849                              struct rte_flow_error *error)
14850 {
14851         struct mlx5_priv *priv = dev->data->dev_private;
14852         struct mlx5_shared_action_rss *shared_rss =
14853             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14854         uint32_t old_refcnt = 1;
14855         int remaining;
14856         uint16_t *queue = NULL;
14857
14858         if (!shared_rss)
14859                 return rte_flow_error_set(error, EINVAL,
14860                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14861                                           "invalid shared action");
14862         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14863                                          0, 0, __ATOMIC_ACQUIRE,
14864                                          __ATOMIC_RELAXED))
14865                 return rte_flow_error_set(error, EBUSY,
14866                                           RTE_FLOW_ERROR_TYPE_ACTION,
14867                                           NULL,
14868                                           "shared rss has references");
14869         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14870         if (remaining)
14871                 return rte_flow_error_set(error, EBUSY,
14872                                           RTE_FLOW_ERROR_TYPE_ACTION,
14873                                           NULL,
14874                                           "shared rss hrxq has references");
14875         queue = shared_rss->ind_tbl->queues;
14876         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
14877                                                !!dev->data->dev_started);
14878         if (remaining)
14879                 return rte_flow_error_set(error, EBUSY,
14880                                           RTE_FLOW_ERROR_TYPE_ACTION,
14881                                           NULL,
14882                                           "shared rss indirection table has"
14883                                           " references");
14884         mlx5_free(queue);
14885         rte_spinlock_lock(&priv->shared_act_sl);
14886         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14887                      &priv->rss_shared_actions, idx, shared_rss, next);
14888         rte_spinlock_unlock(&priv->shared_act_sl);
14889         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14890                         idx);
14891         return 0;
14892 }
14893
14894 /**
14895  * Create indirect action, lock free,
14896  * (mutex should be acquired by caller).
14897  * Dispatcher for action type specific call.
14898  *
14899  * @param[in] dev
14900  *   Pointer to the Ethernet device structure.
14901  * @param[in] conf
14902  *   Shared action configuration.
14903  * @param[in] action
14904  *   Action specification used to create indirect action.
14905  * @param[out] error
14906  *   Perform verbose error reporting if not NULL. Initialized in case of
14907  *   error only.
14908  *
14909  * @return
14910  *   A valid shared action handle in case of success, NULL otherwise and
14911  *   rte_errno is set.
14912  */
14913 static struct rte_flow_action_handle *
14914 flow_dv_action_create(struct rte_eth_dev *dev,
14915                       const struct rte_flow_indir_action_conf *conf,
14916                       const struct rte_flow_action *action,
14917                       struct rte_flow_error *err)
14918 {
14919         struct mlx5_priv *priv = dev->data->dev_private;
14920         uint32_t age_idx = 0;
14921         uint32_t idx = 0;
14922         uint32_t ret = 0;
14923
14924         switch (action->type) {
14925         case RTE_FLOW_ACTION_TYPE_RSS:
14926                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14927                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14928                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14929                 break;
14930         case RTE_FLOW_ACTION_TYPE_AGE:
14931                 age_idx = flow_dv_aso_age_alloc(dev, err);
14932                 if (!age_idx) {
14933                         ret = -rte_errno;
14934                         break;
14935                 }
14936                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14937                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14938                 flow_dv_aso_age_params_init(dev, age_idx,
14939                                         ((const struct rte_flow_action_age *)
14940                                                 action->conf)->context ?
14941                                         ((const struct rte_flow_action_age *)
14942                                                 action->conf)->context :
14943                                         (void *)(uintptr_t)idx,
14944                                         ((const struct rte_flow_action_age *)
14945                                                 action->conf)->timeout);
14946                 ret = age_idx;
14947                 break;
14948         case RTE_FLOW_ACTION_TYPE_COUNT:
14949                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14950                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14951                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14952                 break;
14953         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14954                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14955                                                          err);
14956                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14957                 break;
14958         default:
14959                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14960                                    NULL, "action type not supported");
14961                 break;
14962         }
14963         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14964 }
14965
14966 /**
14967  * Destroy the indirect action.
14968  * Release action related resources on the NIC and the memory.
14969  * Lock free, (mutex should be acquired by caller).
14970  * Dispatcher for action type specific call.
14971  *
14972  * @param[in] dev
14973  *   Pointer to the Ethernet device structure.
14974  * @param[in] handle
14975  *   The indirect action object handle to be removed.
14976  * @param[out] error
14977  *   Perform verbose error reporting if not NULL. Initialized in case of
14978  *   error only.
14979  *
14980  * @return
14981  *   0 on success, otherwise negative errno value.
14982  */
14983 static int
14984 flow_dv_action_destroy(struct rte_eth_dev *dev,
14985                        struct rte_flow_action_handle *handle,
14986                        struct rte_flow_error *error)
14987 {
14988         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14989         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14990         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14991         struct mlx5_flow_counter *cnt;
14992         uint32_t no_flow_refcnt = 1;
14993         int ret;
14994
14995         switch (type) {
14996         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14997                 return __flow_dv_action_rss_release(dev, idx, error);
14998         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14999                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15000                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15001                                                  &no_flow_refcnt, 1, false,
15002                                                  __ATOMIC_ACQUIRE,
15003                                                  __ATOMIC_RELAXED))
15004                         return rte_flow_error_set(error, EBUSY,
15005                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15006                                                   NULL,
15007                                                   "Indirect count action has references");
15008                 flow_dv_counter_free(dev, idx);
15009                 return 0;
15010         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15011                 ret = flow_dv_aso_age_release(dev, idx);
15012                 if (ret)
15013                         /*
15014                          * In this case, the last flow has a reference will
15015                          * actually release the age action.
15016                          */
15017                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15018                                 " released with references %d.", idx, ret);
15019                 return 0;
15020         case MLX5_INDIRECT_ACTION_TYPE_CT:
15021                 ret = flow_dv_aso_ct_release(dev, idx, error);
15022                 if (ret < 0)
15023                         return ret;
15024                 if (ret > 0)
15025                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15026                                 "has references %d.", idx, ret);
15027                 return 0;
15028         default:
15029                 return rte_flow_error_set(error, ENOTSUP,
15030                                           RTE_FLOW_ERROR_TYPE_ACTION,
15031                                           NULL,
15032                                           "action type not supported");
15033         }
15034 }
15035
15036 /**
15037  * Updates in place shared RSS action configuration.
15038  *
15039  * @param[in] dev
15040  *   Pointer to the Ethernet device structure.
15041  * @param[in] idx
15042  *   The shared RSS action object ID to be updated.
15043  * @param[in] action_conf
15044  *   RSS action specification used to modify *shared_rss*.
15045  * @param[out] error
15046  *   Perform verbose error reporting if not NULL. Initialized in case of
15047  *   error only.
15048  *
15049  * @return
15050  *   0 on success, otherwise negative errno value.
15051  * @note: currently only support update of RSS queues.
15052  */
15053 static int
15054 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15055                             const struct rte_flow_action_rss *action_conf,
15056                             struct rte_flow_error *error)
15057 {
15058         struct mlx5_priv *priv = dev->data->dev_private;
15059         struct mlx5_shared_action_rss *shared_rss =
15060             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15061         int ret = 0;
15062         void *queue = NULL;
15063         uint16_t *queue_old = NULL;
15064         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15065         bool dev_started = !!dev->data->dev_started;
15066
15067         if (!shared_rss)
15068                 return rte_flow_error_set(error, EINVAL,
15069                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15070                                           "invalid shared action to update");
15071         if (priv->obj_ops.ind_table_modify == NULL)
15072                 return rte_flow_error_set(error, ENOTSUP,
15073                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15074                                           "cannot modify indirection table");
15075         queue = mlx5_malloc(MLX5_MEM_ZERO,
15076                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15077                             0, SOCKET_ID_ANY);
15078         if (!queue)
15079                 return rte_flow_error_set(error, ENOMEM,
15080                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15081                                           NULL,
15082                                           "cannot allocate resource memory");
15083         memcpy(queue, action_conf->queue, queue_size);
15084         MLX5_ASSERT(shared_rss->ind_tbl);
15085         rte_spinlock_lock(&shared_rss->action_rss_sl);
15086         queue_old = shared_rss->ind_tbl->queues;
15087         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15088                                         queue, action_conf->queue_num,
15089                                         true /* standalone */,
15090                                         dev_started /* ref_new_qs */,
15091                                         dev_started /* deref_old_qs */);
15092         if (ret) {
15093                 mlx5_free(queue);
15094                 ret = rte_flow_error_set(error, rte_errno,
15095                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15096                                           "cannot update indirection table");
15097         } else {
15098                 mlx5_free(queue_old);
15099                 shared_rss->origin.queue = queue;
15100                 shared_rss->origin.queue_num = action_conf->queue_num;
15101         }
15102         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15103         return ret;
15104 }
15105
15106 /*
15107  * Updates in place conntrack context or direction.
15108  * Context update should be synchronized.
15109  *
15110  * @param[in] dev
15111  *   Pointer to the Ethernet device structure.
15112  * @param[in] idx
15113  *   The conntrack object ID to be updated.
15114  * @param[in] update
15115  *   Pointer to the structure of information to update.
15116  * @param[out] error
15117  *   Perform verbose error reporting if not NULL. Initialized in case of
15118  *   error only.
15119  *
15120  * @return
15121  *   0 on success, otherwise negative errno value.
15122  */
15123 static int
15124 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15125                            const struct rte_flow_modify_conntrack *update,
15126                            struct rte_flow_error *error)
15127 {
15128         struct mlx5_priv *priv = dev->data->dev_private;
15129         struct mlx5_aso_ct_action *ct;
15130         const struct rte_flow_action_conntrack *new_prf;
15131         int ret = 0;
15132         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15133         uint32_t dev_idx;
15134
15135         if (PORT_ID(priv) != owner)
15136                 return rte_flow_error_set(error, EACCES,
15137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15138                                           NULL,
15139                                           "CT object owned by another port");
15140         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15141         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15142         if (!ct->refcnt)
15143                 return rte_flow_error_set(error, ENOMEM,
15144                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15145                                           NULL,
15146                                           "CT object is inactive");
15147         new_prf = &update->new_ct;
15148         if (update->direction)
15149                 ct->is_original = !!new_prf->is_original_dir;
15150         if (update->state) {
15151                 /* Only validate the profile when it needs to be updated. */
15152                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15153                 if (ret)
15154                         return ret;
15155                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15156                 if (ret)
15157                         return rte_flow_error_set(error, EIO,
15158                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15159                                         NULL,
15160                                         "Failed to send CT context update WQE");
15161                 /* Block until ready or a failure. */
15162                 ret = mlx5_aso_ct_available(priv->sh, ct);
15163                 if (ret)
15164                         rte_flow_error_set(error, rte_errno,
15165                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15166                                            NULL,
15167                                            "Timeout to get the CT update");
15168         }
15169         return ret;
15170 }
15171
15172 /**
15173  * Updates in place shared action configuration, lock free,
15174  * (mutex should be acquired by caller).
15175  *
15176  * @param[in] dev
15177  *   Pointer to the Ethernet device structure.
15178  * @param[in] handle
15179  *   The indirect action object handle to be updated.
15180  * @param[in] update
15181  *   Action specification used to modify the action pointed by *handle*.
15182  *   *update* could be of same type with the action pointed by the *handle*
15183  *   handle argument, or some other structures like a wrapper, depending on
15184  *   the indirect action type.
15185  * @param[out] error
15186  *   Perform verbose error reporting if not NULL. Initialized in case of
15187  *   error only.
15188  *
15189  * @return
15190  *   0 on success, otherwise negative errno value.
15191  */
15192 static int
15193 flow_dv_action_update(struct rte_eth_dev *dev,
15194                         struct rte_flow_action_handle *handle,
15195                         const void *update,
15196                         struct rte_flow_error *err)
15197 {
15198         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15199         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15200         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15201         const void *action_conf;
15202
15203         switch (type) {
15204         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15205                 action_conf = ((const struct rte_flow_action *)update)->conf;
15206                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15207         case MLX5_INDIRECT_ACTION_TYPE_CT:
15208                 return __flow_dv_action_ct_update(dev, idx, update, err);
15209         default:
15210                 return rte_flow_error_set(err, ENOTSUP,
15211                                           RTE_FLOW_ERROR_TYPE_ACTION,
15212                                           NULL,
15213                                           "action type update not supported");
15214         }
15215 }
15216
15217 /**
15218  * Destroy the meter sub policy table rules.
15219  * Lock free, (mutex should be acquired by caller).
15220  *
15221  * @param[in] dev
15222  *   Pointer to Ethernet device.
15223  * @param[in] sub_policy
15224  *   Pointer to meter sub policy table.
15225  */
15226 static void
15227 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15228                              struct mlx5_flow_meter_sub_policy *sub_policy)
15229 {
15230         struct mlx5_priv *priv = dev->data->dev_private;
15231         struct mlx5_flow_tbl_data_entry *tbl;
15232         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15233         struct mlx5_flow_meter_info *next_fm;
15234         struct mlx5_sub_policy_color_rule *color_rule;
15235         void *tmp;
15236         uint32_t i;
15237
15238         for (i = 0; i < RTE_COLORS; i++) {
15239                 next_fm = NULL;
15240                 if (i == RTE_COLOR_GREEN && policy &&
15241                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15242                         next_fm = mlx5_flow_meter_find(priv,
15243                                         policy->act_cnt[i].next_mtr_id, NULL);
15244                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15245                                    next_port, tmp) {
15246                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15247                         tbl = container_of(color_rule->matcher->tbl,
15248                                            typeof(*tbl), tbl);
15249                         mlx5_list_unregister(tbl->matchers,
15250                                              &color_rule->matcher->entry);
15251                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15252                                      color_rule, next_port);
15253                         mlx5_free(color_rule);
15254                         if (next_fm)
15255                                 mlx5_flow_meter_detach(priv, next_fm);
15256                 }
15257         }
15258         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15259                 if (sub_policy->rix_hrxq[i]) {
15260                         if (policy && !policy->is_hierarchy)
15261                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15262                         sub_policy->rix_hrxq[i] = 0;
15263                 }
15264                 if (sub_policy->jump_tbl[i]) {
15265                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15266                                                      sub_policy->jump_tbl[i]);
15267                         sub_policy->jump_tbl[i] = NULL;
15268                 }
15269         }
15270         if (sub_policy->tbl_rsc) {
15271                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15272                                              sub_policy->tbl_rsc);
15273                 sub_policy->tbl_rsc = NULL;
15274         }
15275 }
15276
15277 /**
15278  * Destroy policy rules, lock free,
15279  * (mutex should be acquired by caller).
15280  * Dispatcher for action type specific call.
15281  *
15282  * @param[in] dev
15283  *   Pointer to the Ethernet device structure.
15284  * @param[in] mtr_policy
15285  *   Meter policy struct.
15286  */
15287 static void
15288 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15289                              struct mlx5_flow_meter_policy *mtr_policy)
15290 {
15291         uint32_t i, j;
15292         struct mlx5_flow_meter_sub_policy *sub_policy;
15293         uint16_t sub_policy_num;
15294
15295         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15296                 sub_policy_num = (mtr_policy->sub_policy_num >>
15297                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15298                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15299                 for (j = 0; j < sub_policy_num; j++) {
15300                         sub_policy = mtr_policy->sub_policys[i][j];
15301                         if (sub_policy)
15302                                 __flow_dv_destroy_sub_policy_rules(dev,
15303                                                                    sub_policy);
15304                 }
15305         }
15306 }
15307
15308 /**
15309  * Destroy policy action, lock free,
15310  * (mutex should be acquired by caller).
15311  * Dispatcher for action type specific call.
15312  *
15313  * @param[in] dev
15314  *   Pointer to the Ethernet device structure.
15315  * @param[in] mtr_policy
15316  *   Meter policy struct.
15317  */
15318 static void
15319 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15320                       struct mlx5_flow_meter_policy *mtr_policy)
15321 {
15322         struct rte_flow_action *rss_action;
15323         struct mlx5_flow_handle dev_handle;
15324         uint32_t i, j;
15325
15326         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15327                 if (mtr_policy->act_cnt[i].rix_mark) {
15328                         flow_dv_tag_release(dev,
15329                                 mtr_policy->act_cnt[i].rix_mark);
15330                         mtr_policy->act_cnt[i].rix_mark = 0;
15331                 }
15332                 if (mtr_policy->act_cnt[i].modify_hdr) {
15333                         dev_handle.dvh.modify_hdr =
15334                                 mtr_policy->act_cnt[i].modify_hdr;
15335                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15336                 }
15337                 switch (mtr_policy->act_cnt[i].fate_action) {
15338                 case MLX5_FLOW_FATE_SHARED_RSS:
15339                         rss_action = mtr_policy->act_cnt[i].rss;
15340                         mlx5_free(rss_action);
15341                         break;
15342                 case MLX5_FLOW_FATE_PORT_ID:
15343                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15344                                 flow_dv_port_id_action_resource_release(dev,
15345                                 mtr_policy->act_cnt[i].rix_port_id_action);
15346                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15347                         }
15348                         break;
15349                 case MLX5_FLOW_FATE_DROP:
15350                 case MLX5_FLOW_FATE_JUMP:
15351                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15352                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15353                                                 NULL;
15354                         break;
15355                 default:
15356                         /*Queue action do nothing*/
15357                         break;
15358                 }
15359         }
15360         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15361                 mtr_policy->dr_drop_action[j] = NULL;
15362 }
15363
15364 /**
15365  * Create policy action per domain, lock free,
15366  * (mutex should be acquired by caller).
15367  * Dispatcher for action type specific call.
15368  *
15369  * @param[in] dev
15370  *   Pointer to the Ethernet device structure.
15371  * @param[in] mtr_policy
15372  *   Meter policy struct.
15373  * @param[in] action
15374  *   Action specification used to create meter actions.
15375  * @param[out] error
15376  *   Perform verbose error reporting if not NULL. Initialized in case of
15377  *   error only.
15378  *
15379  * @return
15380  *   0 on success, otherwise negative errno value.
15381  */
15382 static int
15383 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15384                         struct mlx5_flow_meter_policy *mtr_policy,
15385                         const struct rte_flow_action *actions[RTE_COLORS],
15386                         enum mlx5_meter_domain domain,
15387                         struct rte_mtr_error *error)
15388 {
15389         struct mlx5_priv *priv = dev->data->dev_private;
15390         struct rte_flow_error flow_err;
15391         const struct rte_flow_action *act;
15392         uint64_t action_flags;
15393         struct mlx5_flow_handle dh;
15394         struct mlx5_flow dev_flow;
15395         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15396         int i, ret;
15397         uint8_t egress, transfer;
15398         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15399         union {
15400                 struct mlx5_flow_dv_modify_hdr_resource res;
15401                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15402                             sizeof(struct mlx5_modification_cmd) *
15403                             (MLX5_MAX_MODIFY_NUM + 1)];
15404         } mhdr_dummy;
15405         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15406
15407         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15408         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15409         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15410         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15411         memset(&port_id_action, 0,
15412                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15413         memset(mhdr_res, 0, sizeof(*mhdr_res));
15414         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15415                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15416                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15417         dev_flow.handle = &dh;
15418         dev_flow.dv.port_id_action = &port_id_action;
15419         dev_flow.external = true;
15420         for (i = 0; i < RTE_COLORS; i++) {
15421                 if (i < MLX5_MTR_RTE_COLORS)
15422                         act_cnt = &mtr_policy->act_cnt[i];
15423                 /* Skip the color policy actions creation. */
15424                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15425                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15426                         continue;
15427                 action_flags = 0;
15428                 for (act = actions[i];
15429                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15430                         switch (act->type) {
15431                         case RTE_FLOW_ACTION_TYPE_MARK:
15432                         {
15433                                 uint32_t tag_be = mlx5_flow_mark_set
15434                                         (((const struct rte_flow_action_mark *)
15435                                         (act->conf))->id);
15436
15437                                 if (i >= MLX5_MTR_RTE_COLORS)
15438                                         return -rte_mtr_error_set(error,
15439                                           ENOTSUP,
15440                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15441                                           NULL,
15442                                           "cannot create policy "
15443                                           "mark action for this color");
15444                                 dev_flow.handle->mark = 1;
15445                                 if (flow_dv_tag_resource_register(dev, tag_be,
15446                                                   &dev_flow, &flow_err))
15447                                         return -rte_mtr_error_set(error,
15448                                         ENOTSUP,
15449                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15450                                         NULL,
15451                                         "cannot setup policy mark action");
15452                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15453                                 act_cnt->rix_mark =
15454                                         dev_flow.handle->dvh.rix_tag;
15455                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15456                                 break;
15457                         }
15458                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15459                                 if (i >= MLX5_MTR_RTE_COLORS)
15460                                         return -rte_mtr_error_set(error,
15461                                           ENOTSUP,
15462                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15463                                           NULL,
15464                                           "cannot create policy "
15465                                           "set tag action for this color");
15466                                 if (flow_dv_convert_action_set_tag
15467                                 (dev, mhdr_res,
15468                                 (const struct rte_flow_action_set_tag *)
15469                                 act->conf,  &flow_err))
15470                                         return -rte_mtr_error_set(error,
15471                                         ENOTSUP,
15472                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15473                                         NULL, "cannot convert policy "
15474                                         "set tag action");
15475                                 if (!mhdr_res->actions_num)
15476                                         return -rte_mtr_error_set(error,
15477                                         ENOTSUP,
15478                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15479                                         NULL, "cannot find policy "
15480                                         "set tag action");
15481                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15482                                 break;
15483                         case RTE_FLOW_ACTION_TYPE_DROP:
15484                         {
15485                                 struct mlx5_flow_mtr_mng *mtrmng =
15486                                                 priv->sh->mtrmng;
15487                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15488
15489                                 /*
15490                                  * Create the drop table with
15491                                  * METER DROP level.
15492                                  */
15493                                 if (!mtrmng->drop_tbl[domain]) {
15494                                         mtrmng->drop_tbl[domain] =
15495                                         flow_dv_tbl_resource_get(dev,
15496                                         MLX5_FLOW_TABLE_LEVEL_METER,
15497                                         egress, transfer, false, NULL, 0,
15498                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15499                                         if (!mtrmng->drop_tbl[domain])
15500                                                 return -rte_mtr_error_set
15501                                         (error, ENOTSUP,
15502                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15503                                         NULL,
15504                                         "Failed to create meter drop table");
15505                                 }
15506                                 tbl_data = container_of
15507                                 (mtrmng->drop_tbl[domain],
15508                                 struct mlx5_flow_tbl_data_entry, tbl);
15509                                 if (i < MLX5_MTR_RTE_COLORS) {
15510                                         act_cnt->dr_jump_action[domain] =
15511                                                 tbl_data->jump.action;
15512                                         act_cnt->fate_action =
15513                                                 MLX5_FLOW_FATE_DROP;
15514                                 }
15515                                 if (i == RTE_COLOR_RED)
15516                                         mtr_policy->dr_drop_action[domain] =
15517                                                 tbl_data->jump.action;
15518                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15519                                 break;
15520                         }
15521                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15522                         {
15523                                 if (i >= MLX5_MTR_RTE_COLORS)
15524                                         return -rte_mtr_error_set(error,
15525                                         ENOTSUP,
15526                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15527                                         NULL, "cannot create policy "
15528                                         "fate queue for this color");
15529                                 act_cnt->queue =
15530                                 ((const struct rte_flow_action_queue *)
15531                                         (act->conf))->index;
15532                                 act_cnt->fate_action =
15533                                         MLX5_FLOW_FATE_QUEUE;
15534                                 dev_flow.handle->fate_action =
15535                                         MLX5_FLOW_FATE_QUEUE;
15536                                 mtr_policy->is_queue = 1;
15537                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15538                                 break;
15539                         }
15540                         case RTE_FLOW_ACTION_TYPE_RSS:
15541                         {
15542                                 int rss_size;
15543
15544                                 if (i >= MLX5_MTR_RTE_COLORS)
15545                                         return -rte_mtr_error_set(error,
15546                                           ENOTSUP,
15547                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15548                                           NULL,
15549                                           "cannot create policy "
15550                                           "rss action for this color");
15551                                 /*
15552                                  * Save RSS conf into policy struct
15553                                  * for translate stage.
15554                                  */
15555                                 rss_size = (int)rte_flow_conv
15556                                         (RTE_FLOW_CONV_OP_ACTION,
15557                                         NULL, 0, act, &flow_err);
15558                                 if (rss_size <= 0)
15559                                         return -rte_mtr_error_set(error,
15560                                           ENOTSUP,
15561                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15562                                           NULL, "Get the wrong "
15563                                           "rss action struct size");
15564                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15565                                                 rss_size, 0, SOCKET_ID_ANY);
15566                                 if (!act_cnt->rss)
15567                                         return -rte_mtr_error_set(error,
15568                                           ENOTSUP,
15569                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15570                                           NULL,
15571                                           "Fail to malloc rss action memory");
15572                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15573                                         act_cnt->rss, rss_size,
15574                                         act, &flow_err);
15575                                 if (ret < 0)
15576                                         return -rte_mtr_error_set(error,
15577                                           ENOTSUP,
15578                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15579                                           NULL, "Fail to save "
15580                                           "rss action into policy struct");
15581                                 act_cnt->fate_action =
15582                                         MLX5_FLOW_FATE_SHARED_RSS;
15583                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15584                                 break;
15585                         }
15586                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15587                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15588                         {
15589                                 struct mlx5_flow_dv_port_id_action_resource
15590                                         port_id_resource;
15591                                 uint32_t port_id = 0;
15592
15593                                 if (i >= MLX5_MTR_RTE_COLORS)
15594                                         return -rte_mtr_error_set(error,
15595                                         ENOTSUP,
15596                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15597                                         NULL, "cannot create policy "
15598                                         "port action for this color");
15599                                 memset(&port_id_resource, 0,
15600                                         sizeof(port_id_resource));
15601                                 if (flow_dv_translate_action_port_id(dev, act,
15602                                                 &port_id, &flow_err))
15603                                         return -rte_mtr_error_set(error,
15604                                         ENOTSUP,
15605                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15606                                         NULL, "cannot translate "
15607                                         "policy port action");
15608                                 port_id_resource.port_id = port_id;
15609                                 if (flow_dv_port_id_action_resource_register
15610                                         (dev, &port_id_resource,
15611                                         &dev_flow, &flow_err))
15612                                         return -rte_mtr_error_set(error,
15613                                         ENOTSUP,
15614                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15615                                         NULL, "cannot setup "
15616                                         "policy port action");
15617                                 act_cnt->rix_port_id_action =
15618                                         dev_flow.handle->rix_port_id_action;
15619                                 act_cnt->fate_action =
15620                                         MLX5_FLOW_FATE_PORT_ID;
15621                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15622                                 break;
15623                         }
15624                         case RTE_FLOW_ACTION_TYPE_JUMP:
15625                         {
15626                                 uint32_t jump_group = 0;
15627                                 uint32_t table = 0;
15628                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15629                                 struct flow_grp_info grp_info = {
15630                                         .external = !!dev_flow.external,
15631                                         .transfer = !!transfer,
15632                                         .fdb_def_rule = !!priv->fdb_def_rule,
15633                                         .std_tbl_fix = 0,
15634                                         .skip_scale = dev_flow.skip_scale &
15635                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15636                                 };
15637                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15638                                         mtr_policy->sub_policys[domain][0];
15639
15640                                 if (i >= MLX5_MTR_RTE_COLORS)
15641                                         return -rte_mtr_error_set(error,
15642                                           ENOTSUP,
15643                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15644                                           NULL,
15645                                           "cannot create policy "
15646                                           "jump action for this color");
15647                                 jump_group =
15648                                 ((const struct rte_flow_action_jump *)
15649                                                         act->conf)->group;
15650                                 if (mlx5_flow_group_to_table(dev, NULL,
15651                                                        jump_group,
15652                                                        &table,
15653                                                        &grp_info, &flow_err))
15654                                         return -rte_mtr_error_set(error,
15655                                         ENOTSUP,
15656                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15657                                         NULL, "cannot setup "
15658                                         "policy jump action");
15659                                 sub_policy->jump_tbl[i] =
15660                                 flow_dv_tbl_resource_get(dev,
15661                                         table, egress,
15662                                         transfer,
15663                                         !!dev_flow.external,
15664                                         NULL, jump_group, 0,
15665                                         0, &flow_err);
15666                                 if
15667                                 (!sub_policy->jump_tbl[i])
15668                                         return  -rte_mtr_error_set(error,
15669                                         ENOTSUP,
15670                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15671                                         NULL, "cannot create jump action.");
15672                                 tbl_data = container_of
15673                                 (sub_policy->jump_tbl[i],
15674                                 struct mlx5_flow_tbl_data_entry, tbl);
15675                                 act_cnt->dr_jump_action[domain] =
15676                                         tbl_data->jump.action;
15677                                 act_cnt->fate_action =
15678                                         MLX5_FLOW_FATE_JUMP;
15679                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15680                                 break;
15681                         }
15682                         /*
15683                          * No need to check meter hierarchy for Y or R colors
15684                          * here since it is done in the validation stage.
15685                          */
15686                         case RTE_FLOW_ACTION_TYPE_METER:
15687                         {
15688                                 const struct rte_flow_action_meter *mtr;
15689                                 struct mlx5_flow_meter_info *next_fm;
15690                                 struct mlx5_flow_meter_policy *next_policy;
15691                                 struct rte_flow_action tag_action;
15692                                 struct mlx5_rte_flow_action_set_tag set_tag;
15693                                 uint32_t next_mtr_idx = 0;
15694
15695                                 mtr = act->conf;
15696                                 next_fm = mlx5_flow_meter_find(priv,
15697                                                         mtr->mtr_id,
15698                                                         &next_mtr_idx);
15699                                 if (!next_fm)
15700                                         return -rte_mtr_error_set(error, EINVAL,
15701                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15702                                                 "Fail to find next meter.");
15703                                 if (next_fm->def_policy)
15704                                         return -rte_mtr_error_set(error, EINVAL,
15705                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15706                                 "Hierarchy only supports termination meter.");
15707                                 next_policy = mlx5_flow_meter_policy_find(dev,
15708                                                 next_fm->policy_id, NULL);
15709                                 MLX5_ASSERT(next_policy);
15710                                 if (next_fm->drop_cnt) {
15711                                         set_tag.id =
15712                                                 (enum modify_reg)
15713                                                 mlx5_flow_get_reg_id(dev,
15714                                                 MLX5_MTR_ID,
15715                                                 0,
15716                                                 (struct rte_flow_error *)error);
15717                                         set_tag.offset = (priv->mtr_reg_share ?
15718                                                 MLX5_MTR_COLOR_BITS : 0);
15719                                         set_tag.length = (priv->mtr_reg_share ?
15720                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15721                                                MLX5_REG_BITS);
15722                                         set_tag.data = next_mtr_idx;
15723                                         tag_action.type =
15724                                                 (enum rte_flow_action_type)
15725                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15726                                         tag_action.conf = &set_tag;
15727                                         if (flow_dv_convert_action_set_reg
15728                                                 (mhdr_res, &tag_action,
15729                                                 (struct rte_flow_error *)error))
15730                                                 return -rte_errno;
15731                                         action_flags |=
15732                                                 MLX5_FLOW_ACTION_SET_TAG;
15733                                 }
15734                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15735                                 act_cnt->next_mtr_id = next_fm->meter_id;
15736                                 act_cnt->next_sub_policy = NULL;
15737                                 mtr_policy->is_hierarchy = 1;
15738                                 mtr_policy->dev = next_policy->dev;
15739                                 action_flags |=
15740                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15741                                 break;
15742                         }
15743                         default:
15744                                 return -rte_mtr_error_set(error, ENOTSUP,
15745                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15746                                           NULL, "action type not supported");
15747                         }
15748                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15749                                 /* create modify action if needed. */
15750                                 dev_flow.dv.group = 1;
15751                                 if (flow_dv_modify_hdr_resource_register
15752                                         (dev, mhdr_res, &dev_flow, &flow_err))
15753                                         return -rte_mtr_error_set(error,
15754                                                 ENOTSUP,
15755                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15756                                                 NULL, "cannot register policy "
15757                                                 "set tag action");
15758                                 act_cnt->modify_hdr =
15759                                         dev_flow.handle->dvh.modify_hdr;
15760                         }
15761                 }
15762         }
15763         return 0;
15764 }
15765
15766 /**
15767  * Create policy action per domain, lock free,
15768  * (mutex should be acquired by caller).
15769  * Dispatcher for action type specific call.
15770  *
15771  * @param[in] dev
15772  *   Pointer to the Ethernet device structure.
15773  * @param[in] mtr_policy
15774  *   Meter policy struct.
15775  * @param[in] action
15776  *   Action specification used to create meter actions.
15777  * @param[out] error
15778  *   Perform verbose error reporting if not NULL. Initialized in case of
15779  *   error only.
15780  *
15781  * @return
15782  *   0 on success, otherwise negative errno value.
15783  */
15784 static int
15785 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15786                       struct mlx5_flow_meter_policy *mtr_policy,
15787                       const struct rte_flow_action *actions[RTE_COLORS],
15788                       struct rte_mtr_error *error)
15789 {
15790         int ret, i;
15791         uint16_t sub_policy_num;
15792
15793         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15794                 sub_policy_num = (mtr_policy->sub_policy_num >>
15795                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15796                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15797                 if (sub_policy_num) {
15798                         ret = __flow_dv_create_domain_policy_acts(dev,
15799                                 mtr_policy, actions,
15800                                 (enum mlx5_meter_domain)i, error);
15801                         /* Cleaning resource is done in the caller level. */
15802                         if (ret)
15803                                 return ret;
15804                 }
15805         }
15806         return 0;
15807 }
15808
15809 /**
15810  * Query a DV flow rule for its statistics via DevX.
15811  *
15812  * @param[in] dev
15813  *   Pointer to Ethernet device.
15814  * @param[in] cnt_idx
15815  *   Index to the flow counter.
15816  * @param[out] data
15817  *   Data retrieved by the query.
15818  * @param[out] error
15819  *   Perform verbose error reporting if not NULL.
15820  *
15821  * @return
15822  *   0 on success, a negative errno value otherwise and rte_errno is set.
15823  */
15824 int
15825 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15826                     struct rte_flow_error *error)
15827 {
15828         struct mlx5_priv *priv = dev->data->dev_private;
15829         struct rte_flow_query_count *qc = data;
15830
15831         if (!priv->sh->devx)
15832                 return rte_flow_error_set(error, ENOTSUP,
15833                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15834                                           NULL,
15835                                           "counters are not supported");
15836         if (cnt_idx) {
15837                 uint64_t pkts, bytes;
15838                 struct mlx5_flow_counter *cnt;
15839                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15840
15841                 if (err)
15842                         return rte_flow_error_set(error, -err,
15843                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15844                                         NULL, "cannot read counters");
15845                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15846                 qc->hits_set = 1;
15847                 qc->bytes_set = 1;
15848                 qc->hits = pkts - cnt->hits;
15849                 qc->bytes = bytes - cnt->bytes;
15850                 if (qc->reset) {
15851                         cnt->hits = pkts;
15852                         cnt->bytes = bytes;
15853                 }
15854                 return 0;
15855         }
15856         return rte_flow_error_set(error, EINVAL,
15857                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15858                                   NULL,
15859                                   "counters are not available");
15860 }
15861
15862
15863 /**
15864  * Query counter's action pointer for a DV flow rule via DevX.
15865  *
15866  * @param[in] dev
15867  *   Pointer to Ethernet device.
15868  * @param[in] cnt_idx
15869  *   Index to the flow counter.
15870  * @param[out] action_ptr
15871  *   Action pointer for counter.
15872  * @param[out] error
15873  *   Perform verbose error reporting if not NULL.
15874  *
15875  * @return
15876  *   0 on success, a negative errno value otherwise and rte_errno is set.
15877  */
15878 int
15879 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15880         void **action_ptr, struct rte_flow_error *error)
15881 {
15882         struct mlx5_priv *priv = dev->data->dev_private;
15883
15884         if (!priv->sh->devx || !action_ptr)
15885                 return rte_flow_error_set(error, ENOTSUP,
15886                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15887                                           NULL,
15888                                           "counters are not supported");
15889
15890         if (cnt_idx) {
15891                 struct mlx5_flow_counter *cnt = NULL;
15892                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15893                 if (cnt) {
15894                         *action_ptr = cnt->action;
15895                         return 0;
15896                 }
15897         }
15898         return rte_flow_error_set(error, EINVAL,
15899                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15900                                   NULL,
15901                                   "counters are not available");
15902 }
15903
15904 static int
15905 flow_dv_action_query(struct rte_eth_dev *dev,
15906                      const struct rte_flow_action_handle *handle, void *data,
15907                      struct rte_flow_error *error)
15908 {
15909         struct mlx5_age_param *age_param;
15910         struct rte_flow_query_age *resp;
15911         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15912         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15913         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15914         struct mlx5_priv *priv = dev->data->dev_private;
15915         struct mlx5_aso_ct_action *ct;
15916         uint16_t owner;
15917         uint32_t dev_idx;
15918
15919         switch (type) {
15920         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15921                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15922                 resp = data;
15923                 resp->aged = __atomic_load_n(&age_param->state,
15924                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15925                                                                           1 : 0;
15926                 resp->sec_since_last_hit_valid = !resp->aged;
15927                 if (resp->sec_since_last_hit_valid)
15928                         resp->sec_since_last_hit = __atomic_load_n
15929                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15930                 return 0;
15931         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15932                 return flow_dv_query_count(dev, idx, data, error);
15933         case MLX5_INDIRECT_ACTION_TYPE_CT:
15934                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15935                 if (owner != PORT_ID(priv))
15936                         return rte_flow_error_set(error, EACCES,
15937                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15938                                         NULL,
15939                                         "CT object owned by another port");
15940                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15941                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15942                 MLX5_ASSERT(ct);
15943                 if (!ct->refcnt)
15944                         return rte_flow_error_set(error, EFAULT,
15945                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15946                                         NULL,
15947                                         "CT object is inactive");
15948                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15949                                                         ct->peer;
15950                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15951                                                         ct->is_original;
15952                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15953                         return rte_flow_error_set(error, EIO,
15954                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15955                                         NULL,
15956                                         "Failed to query CT context");
15957                 return 0;
15958         default:
15959                 return rte_flow_error_set(error, ENOTSUP,
15960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15961                                           "action type query not supported");
15962         }
15963 }
15964
15965 /**
15966  * Query a flow rule AGE action for aging information.
15967  *
15968  * @param[in] dev
15969  *   Pointer to Ethernet device.
15970  * @param[in] flow
15971  *   Pointer to the sub flow.
15972  * @param[out] data
15973  *   data retrieved by the query.
15974  * @param[out] error
15975  *   Perform verbose error reporting if not NULL.
15976  *
15977  * @return
15978  *   0 on success, a negative errno value otherwise and rte_errno is set.
15979  */
15980 static int
15981 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15982                   void *data, struct rte_flow_error *error)
15983 {
15984         struct rte_flow_query_age *resp = data;
15985         struct mlx5_age_param *age_param;
15986
15987         if (flow->age) {
15988                 struct mlx5_aso_age_action *act =
15989                                      flow_aso_age_get_by_idx(dev, flow->age);
15990
15991                 age_param = &act->age_params;
15992         } else if (flow->counter) {
15993                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15994
15995                 if (!age_param || !age_param->timeout)
15996                         return rte_flow_error_set
15997                                         (error, EINVAL,
15998                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15999                                          NULL, "cannot read age data");
16000         } else {
16001                 return rte_flow_error_set(error, EINVAL,
16002                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16003                                           NULL, "age data not available");
16004         }
16005         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16006                                      AGE_TMOUT ? 1 : 0;
16007         resp->sec_since_last_hit_valid = !resp->aged;
16008         if (resp->sec_since_last_hit_valid)
16009                 resp->sec_since_last_hit = __atomic_load_n
16010                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16011         return 0;
16012 }
16013
16014 /**
16015  * Query a flow.
16016  *
16017  * @see rte_flow_query()
16018  * @see rte_flow_ops
16019  */
16020 static int
16021 flow_dv_query(struct rte_eth_dev *dev,
16022               struct rte_flow *flow __rte_unused,
16023               const struct rte_flow_action *actions __rte_unused,
16024               void *data __rte_unused,
16025               struct rte_flow_error *error __rte_unused)
16026 {
16027         int ret = -EINVAL;
16028
16029         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16030                 switch (actions->type) {
16031                 case RTE_FLOW_ACTION_TYPE_VOID:
16032                         break;
16033                 case RTE_FLOW_ACTION_TYPE_COUNT:
16034                         ret = flow_dv_query_count(dev, flow->counter, data,
16035                                                   error);
16036                         break;
16037                 case RTE_FLOW_ACTION_TYPE_AGE:
16038                         ret = flow_dv_query_age(dev, flow, data, error);
16039                         break;
16040                 default:
16041                         return rte_flow_error_set(error, ENOTSUP,
16042                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16043                                                   actions,
16044                                                   "action not supported");
16045                 }
16046         }
16047         return ret;
16048 }
16049
16050 /**
16051  * Destroy the meter table set.
16052  * Lock free, (mutex should be acquired by caller).
16053  *
16054  * @param[in] dev
16055  *   Pointer to Ethernet device.
16056  * @param[in] fm
16057  *   Meter information table.
16058  */
16059 static void
16060 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16061                         struct mlx5_flow_meter_info *fm)
16062 {
16063         struct mlx5_priv *priv = dev->data->dev_private;
16064         int i;
16065
16066         if (!fm || !priv->config.dv_flow_en)
16067                 return;
16068         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16069                 if (fm->drop_rule[i]) {
16070                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16071                         fm->drop_rule[i] = NULL;
16072                 }
16073         }
16074 }
16075
16076 static void
16077 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16078 {
16079         struct mlx5_priv *priv = dev->data->dev_private;
16080         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16081         struct mlx5_flow_tbl_data_entry *tbl;
16082         int i, j;
16083
16084         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16085                 if (mtrmng->def_rule[i]) {
16086                         claim_zero(mlx5_flow_os_destroy_flow
16087                                         (mtrmng->def_rule[i]));
16088                         mtrmng->def_rule[i] = NULL;
16089                 }
16090                 if (mtrmng->def_matcher[i]) {
16091                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16092                                 struct mlx5_flow_tbl_data_entry, tbl);
16093                         mlx5_list_unregister(tbl->matchers,
16094                                              &mtrmng->def_matcher[i]->entry);
16095                         mtrmng->def_matcher[i] = NULL;
16096                 }
16097                 for (j = 0; j < MLX5_REG_BITS; j++) {
16098                         if (mtrmng->drop_matcher[i][j]) {
16099                                 tbl =
16100                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16101                                              struct mlx5_flow_tbl_data_entry,
16102                                              tbl);
16103                                 mlx5_list_unregister(tbl->matchers,
16104                                             &mtrmng->drop_matcher[i][j]->entry);
16105                                 mtrmng->drop_matcher[i][j] = NULL;
16106                         }
16107                 }
16108                 if (mtrmng->drop_tbl[i]) {
16109                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16110                                 mtrmng->drop_tbl[i]);
16111                         mtrmng->drop_tbl[i] = NULL;
16112                 }
16113         }
16114 }
16115
16116 /* Number of meter flow actions, count and jump or count and drop. */
16117 #define METER_ACTIONS 2
16118
16119 static void
16120 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16121                                     enum mlx5_meter_domain domain)
16122 {
16123         struct mlx5_priv *priv = dev->data->dev_private;
16124         struct mlx5_flow_meter_def_policy *def_policy =
16125                         priv->sh->mtrmng->def_policy[domain];
16126
16127         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16128         mlx5_free(def_policy);
16129         priv->sh->mtrmng->def_policy[domain] = NULL;
16130 }
16131
16132 /**
16133  * Destroy the default policy table set.
16134  *
16135  * @param[in] dev
16136  *   Pointer to Ethernet device.
16137  */
16138 static void
16139 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16140 {
16141         struct mlx5_priv *priv = dev->data->dev_private;
16142         int i;
16143
16144         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16145                 if (priv->sh->mtrmng->def_policy[i])
16146                         __flow_dv_destroy_domain_def_policy(dev,
16147                                         (enum mlx5_meter_domain)i);
16148         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16149 }
16150
16151 static int
16152 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16153                         uint32_t color_reg_c_idx,
16154                         enum rte_color color, void *matcher_object,
16155                         int actions_n, void *actions,
16156                         bool match_src_port, const struct rte_flow_item *item,
16157                         void **rule, const struct rte_flow_attr *attr)
16158 {
16159         int ret;
16160         struct mlx5_flow_dv_match_params value = {
16161                 .size = sizeof(value.buf),
16162         };
16163         struct mlx5_flow_dv_match_params matcher = {
16164                 .size = sizeof(matcher.buf),
16165         };
16166         struct mlx5_priv *priv = dev->data->dev_private;
16167         uint8_t misc_mask;
16168
16169         if (match_src_port && (priv->representor || priv->master)) {
16170                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16171                                                    value.buf, item, attr)) {
16172                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16173                                 " value with port.", color);
16174                         return -1;
16175                 }
16176         }
16177         flow_dv_match_meta_reg(matcher.buf, value.buf,
16178                                (enum modify_reg)color_reg_c_idx,
16179                                rte_col_2_mlx5_col(color), UINT32_MAX);
16180         misc_mask = flow_dv_matcher_enable(value.buf);
16181         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16182         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16183                                        actions_n, actions, rule);
16184         if (ret) {
16185                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16186                 return -1;
16187         }
16188         return 0;
16189 }
16190
16191 static int
16192 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16193                         uint32_t color_reg_c_idx,
16194                         uint16_t priority,
16195                         struct mlx5_flow_meter_sub_policy *sub_policy,
16196                         const struct rte_flow_attr *attr,
16197                         bool match_src_port,
16198                         const struct rte_flow_item *item,
16199                         struct mlx5_flow_dv_matcher **policy_matcher,
16200                         struct rte_flow_error *error)
16201 {
16202         struct mlx5_list_entry *entry;
16203         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16204         struct mlx5_flow_dv_matcher matcher = {
16205                 .mask = {
16206                         .size = sizeof(matcher.mask.buf),
16207                 },
16208                 .tbl = tbl_rsc,
16209         };
16210         struct mlx5_flow_dv_match_params value = {
16211                 .size = sizeof(value.buf),
16212         };
16213         struct mlx5_flow_cb_ctx ctx = {
16214                 .error = error,
16215                 .data = &matcher,
16216         };
16217         struct mlx5_flow_tbl_data_entry *tbl_data;
16218         struct mlx5_priv *priv = dev->data->dev_private;
16219         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16220
16221         if (match_src_port && (priv->representor || priv->master)) {
16222                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16223                                                    value.buf, item, attr)) {
16224                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16225                                 " with port.", priority);
16226                         return -1;
16227                 }
16228         }
16229         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16230         if (priority < RTE_COLOR_RED)
16231                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16232                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16233         matcher.priority = priority;
16234         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16235                                     matcher.mask.size);
16236         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16237         if (!entry) {
16238                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16239                 return -1;
16240         }
16241         *policy_matcher =
16242                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16243         return 0;
16244 }
16245
16246 /**
16247  * Create the policy rules per domain.
16248  *
16249  * @param[in] dev
16250  *   Pointer to Ethernet device.
16251  * @param[in] sub_policy
16252  *    Pointer to sub policy table..
16253  * @param[in] egress
16254  *   Direction of the table.
16255  * @param[in] transfer
16256  *   E-Switch or NIC flow.
16257  * @param[in] acts
16258  *   Pointer to policy action list per color.
16259  *
16260  * @return
16261  *   0 on success, -1 otherwise.
16262  */
16263 static int
16264 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16265                 struct mlx5_flow_meter_sub_policy *sub_policy,
16266                 uint8_t egress, uint8_t transfer, bool match_src_port,
16267                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16268 {
16269         struct mlx5_priv *priv = dev->data->dev_private;
16270         struct rte_flow_error flow_err;
16271         uint32_t color_reg_c_idx;
16272         struct rte_flow_attr attr = {
16273                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16274                 .priority = 0,
16275                 .ingress = 0,
16276                 .egress = !!egress,
16277                 .transfer = !!transfer,
16278                 .reserved = 0,
16279         };
16280         int i;
16281         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16282         struct mlx5_sub_policy_color_rule *color_rule;
16283         bool svport_match;
16284         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16285
16286         if (ret < 0)
16287                 return -1;
16288         /* Create policy table with POLICY level. */
16289         if (!sub_policy->tbl_rsc)
16290                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16291                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16292                                 egress, transfer, false, NULL, 0, 0,
16293                                 sub_policy->idx, &flow_err);
16294         if (!sub_policy->tbl_rsc) {
16295                 DRV_LOG(ERR,
16296                         "Failed to create meter sub policy table.");
16297                 return -1;
16298         }
16299         /* Prepare matchers. */
16300         color_reg_c_idx = ret;
16301         for (i = 0; i < RTE_COLORS; i++) {
16302                 TAILQ_INIT(&sub_policy->color_rules[i]);
16303                 if (!acts[i].actions_n)
16304                         continue;
16305                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16306                                 sizeof(struct mlx5_sub_policy_color_rule),
16307                                 0, SOCKET_ID_ANY);
16308                 if (!color_rule) {
16309                         DRV_LOG(ERR, "No memory to create color rule.");
16310                         goto err_exit;
16311                 }
16312                 tmp_rules[i] = color_rule;
16313                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16314                                   color_rule, next_port);
16315                 color_rule->src_port = priv->representor_id;
16316                 /* No use. */
16317                 attr.priority = i;
16318                 /* Create matchers for colors. */
16319                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16320                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16321                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16322                                 &attr, svport_match, NULL,
16323                                 &color_rule->matcher, &flow_err)) {
16324                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16325                         goto err_exit;
16326                 }
16327                 /* Create flow, matching color. */
16328                 if (__flow_dv_create_policy_flow(dev,
16329                                 color_reg_c_idx, (enum rte_color)i,
16330                                 color_rule->matcher->matcher_object,
16331                                 acts[i].actions_n, acts[i].dv_actions,
16332                                 svport_match, NULL, &color_rule->rule,
16333                                 &attr)) {
16334                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16335                         goto err_exit;
16336                 }
16337         }
16338         return 0;
16339 err_exit:
16340         /* All the policy rules will be cleared. */
16341         do {
16342                 color_rule = tmp_rules[i];
16343                 if (color_rule) {
16344                         if (color_rule->rule)
16345                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16346                         if (color_rule->matcher) {
16347                                 struct mlx5_flow_tbl_data_entry *tbl =
16348                                         container_of(color_rule->matcher->tbl,
16349                                                      typeof(*tbl), tbl);
16350                                 mlx5_list_unregister(tbl->matchers,
16351                                                 &color_rule->matcher->entry);
16352                         }
16353                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16354                                      color_rule, next_port);
16355                         mlx5_free(color_rule);
16356                 }
16357         } while (i--);
16358         return -1;
16359 }
16360
16361 static int
16362 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16363                         struct mlx5_flow_meter_policy *mtr_policy,
16364                         struct mlx5_flow_meter_sub_policy *sub_policy,
16365                         uint32_t domain)
16366 {
16367         struct mlx5_priv *priv = dev->data->dev_private;
16368         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16369         struct mlx5_flow_dv_tag_resource *tag;
16370         struct mlx5_flow_dv_port_id_action_resource *port_action;
16371         struct mlx5_hrxq *hrxq;
16372         struct mlx5_flow_meter_info *next_fm = NULL;
16373         struct mlx5_flow_meter_policy *next_policy;
16374         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16375         struct mlx5_flow_tbl_data_entry *tbl_data;
16376         struct rte_flow_error error;
16377         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16378         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16379         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16380         bool match_src_port = false;
16381         int i;
16382
16383         /* If RSS or Queue, no previous actions / rules is created. */
16384         for (i = 0; i < RTE_COLORS; i++) {
16385                 acts[i].actions_n = 0;
16386                 if (i == RTE_COLOR_RED) {
16387                         /* Only support drop on red. */
16388                         acts[i].dv_actions[0] =
16389                                 mtr_policy->dr_drop_action[domain];
16390                         acts[i].actions_n = 1;
16391                         continue;
16392                 }
16393                 if (i == RTE_COLOR_GREEN &&
16394                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16395                         struct rte_flow_attr attr = {
16396                                 .transfer = transfer
16397                         };
16398
16399                         next_fm = mlx5_flow_meter_find(priv,
16400                                         mtr_policy->act_cnt[i].next_mtr_id,
16401                                         NULL);
16402                         if (!next_fm) {
16403                                 DRV_LOG(ERR,
16404                                         "Failed to get next hierarchy meter.");
16405                                 goto err_exit;
16406                         }
16407                         if (mlx5_flow_meter_attach(priv, next_fm,
16408                                                    &attr, &error)) {
16409                                 DRV_LOG(ERR, "%s", error.message);
16410                                 next_fm = NULL;
16411                                 goto err_exit;
16412                         }
16413                         /* Meter action must be the first for TX. */
16414                         if (mtr_first) {
16415                                 acts[i].dv_actions[acts[i].actions_n] =
16416                                         next_fm->meter_action;
16417                                 acts[i].actions_n++;
16418                         }
16419                 }
16420                 if (mtr_policy->act_cnt[i].rix_mark) {
16421                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16422                                         mtr_policy->act_cnt[i].rix_mark);
16423                         if (!tag) {
16424                                 DRV_LOG(ERR, "Failed to find "
16425                                 "mark action for policy.");
16426                                 goto err_exit;
16427                         }
16428                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16429                         acts[i].actions_n++;
16430                 }
16431                 if (mtr_policy->act_cnt[i].modify_hdr) {
16432                         acts[i].dv_actions[acts[i].actions_n] =
16433                                 mtr_policy->act_cnt[i].modify_hdr->action;
16434                         acts[i].actions_n++;
16435                 }
16436                 if (mtr_policy->act_cnt[i].fate_action) {
16437                         switch (mtr_policy->act_cnt[i].fate_action) {
16438                         case MLX5_FLOW_FATE_PORT_ID:
16439                                 port_action = mlx5_ipool_get
16440                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16441                                 mtr_policy->act_cnt[i].rix_port_id_action);
16442                                 if (!port_action) {
16443                                         DRV_LOG(ERR, "Failed to find "
16444                                                 "port action for policy.");
16445                                         goto err_exit;
16446                                 }
16447                                 acts[i].dv_actions[acts[i].actions_n] =
16448                                         port_action->action;
16449                                 acts[i].actions_n++;
16450                                 mtr_policy->dev = dev;
16451                                 match_src_port = true;
16452                                 break;
16453                         case MLX5_FLOW_FATE_DROP:
16454                         case MLX5_FLOW_FATE_JUMP:
16455                                 acts[i].dv_actions[acts[i].actions_n] =
16456                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16457                                 acts[i].actions_n++;
16458                                 break;
16459                         case MLX5_FLOW_FATE_SHARED_RSS:
16460                         case MLX5_FLOW_FATE_QUEUE:
16461                                 hrxq = mlx5_ipool_get
16462                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16463                                          sub_policy->rix_hrxq[i]);
16464                                 if (!hrxq) {
16465                                         DRV_LOG(ERR, "Failed to find "
16466                                                 "queue action for policy.");
16467                                         goto err_exit;
16468                                 }
16469                                 acts[i].dv_actions[acts[i].actions_n] =
16470                                         hrxq->action;
16471                                 acts[i].actions_n++;
16472                                 break;
16473                         case MLX5_FLOW_FATE_MTR:
16474                                 if (!next_fm) {
16475                                         DRV_LOG(ERR,
16476                                                 "No next hierarchy meter.");
16477                                         goto err_exit;
16478                                 }
16479                                 if (!mtr_first) {
16480                                         acts[i].dv_actions[acts[i].actions_n] =
16481                                                         next_fm->meter_action;
16482                                         acts[i].actions_n++;
16483                                 }
16484                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16485                                         next_sub_policy =
16486                                         mtr_policy->act_cnt[i].next_sub_policy;
16487                                 } else {
16488                                         next_policy =
16489                                                 mlx5_flow_meter_policy_find(dev,
16490                                                 next_fm->policy_id, NULL);
16491                                         MLX5_ASSERT(next_policy);
16492                                         next_sub_policy =
16493                                         next_policy->sub_policys[domain][0];
16494                                 }
16495                                 tbl_data =
16496                                         container_of(next_sub_policy->tbl_rsc,
16497                                         struct mlx5_flow_tbl_data_entry, tbl);
16498                                 acts[i].dv_actions[acts[i].actions_n++] =
16499                                                         tbl_data->jump.action;
16500                                 if (mtr_policy->act_cnt[i].modify_hdr)
16501                                         match_src_port = !!transfer;
16502                                 break;
16503                         default:
16504                                 /*Queue action do nothing*/
16505                                 break;
16506                         }
16507                 }
16508         }
16509         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16510                                 egress, transfer, match_src_port, acts)) {
16511                 DRV_LOG(ERR,
16512                         "Failed to create policy rules per domain.");
16513                 goto err_exit;
16514         }
16515         return 0;
16516 err_exit:
16517         if (next_fm)
16518                 mlx5_flow_meter_detach(priv, next_fm);
16519         return -1;
16520 }
16521
16522 /**
16523  * Create the policy rules.
16524  *
16525  * @param[in] dev
16526  *   Pointer to Ethernet device.
16527  * @param[in,out] mtr_policy
16528  *   Pointer to meter policy table.
16529  *
16530  * @return
16531  *   0 on success, -1 otherwise.
16532  */
16533 static int
16534 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16535                              struct mlx5_flow_meter_policy *mtr_policy)
16536 {
16537         int i;
16538         uint16_t sub_policy_num;
16539
16540         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16541                 sub_policy_num = (mtr_policy->sub_policy_num >>
16542                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16543                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16544                 if (!sub_policy_num)
16545                         continue;
16546                 /* Prepare actions list and create policy rules. */
16547                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16548                         mtr_policy->sub_policys[i][0], i)) {
16549                         DRV_LOG(ERR, "Failed to create policy action "
16550                                 "list per domain.");
16551                         return -1;
16552                 }
16553         }
16554         return 0;
16555 }
16556
16557 static int
16558 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16559 {
16560         struct mlx5_priv *priv = dev->data->dev_private;
16561         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16562         struct mlx5_flow_meter_def_policy *def_policy;
16563         struct mlx5_flow_tbl_resource *jump_tbl;
16564         struct mlx5_flow_tbl_data_entry *tbl_data;
16565         uint8_t egress, transfer;
16566         struct rte_flow_error error;
16567         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16568         int ret;
16569
16570         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16571         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16572         def_policy = mtrmng->def_policy[domain];
16573         if (!def_policy) {
16574                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16575                         sizeof(struct mlx5_flow_meter_def_policy),
16576                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16577                 if (!def_policy) {
16578                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16579                         goto def_policy_error;
16580                 }
16581                 mtrmng->def_policy[domain] = def_policy;
16582                 /* Create the meter suffix table with SUFFIX level. */
16583                 jump_tbl = flow_dv_tbl_resource_get(dev,
16584                                 MLX5_FLOW_TABLE_LEVEL_METER,
16585                                 egress, transfer, false, NULL, 0,
16586                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16587                 if (!jump_tbl) {
16588                         DRV_LOG(ERR,
16589                                 "Failed to create meter suffix table.");
16590                         goto def_policy_error;
16591                 }
16592                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16593                 tbl_data = container_of(jump_tbl,
16594                                         struct mlx5_flow_tbl_data_entry, tbl);
16595                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16596                                                 tbl_data->jump.action;
16597                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16598                 acts[RTE_COLOR_GREEN].actions_n = 1;
16599                 /*
16600                  * YELLOW has the same default policy as GREEN does.
16601                  * G & Y share the same table and action. The 2nd time of table
16602                  * resource getting is just to update the reference count for
16603                  * the releasing stage.
16604                  */
16605                 jump_tbl = flow_dv_tbl_resource_get(dev,
16606                                 MLX5_FLOW_TABLE_LEVEL_METER,
16607                                 egress, transfer, false, NULL, 0,
16608                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16609                 if (!jump_tbl) {
16610                         DRV_LOG(ERR,
16611                                 "Failed to get meter suffix table.");
16612                         goto def_policy_error;
16613                 }
16614                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16615                 tbl_data = container_of(jump_tbl,
16616                                         struct mlx5_flow_tbl_data_entry, tbl);
16617                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16618                                                 tbl_data->jump.action;
16619                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16620                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16621                 /* Create jump action to the drop table. */
16622                 if (!mtrmng->drop_tbl[domain]) {
16623                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16624                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16625                                  egress, transfer, false, NULL, 0,
16626                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16627                         if (!mtrmng->drop_tbl[domain]) {
16628                                 DRV_LOG(ERR, "Failed to create meter "
16629                                         "drop table for default policy.");
16630                                 goto def_policy_error;
16631                         }
16632                 }
16633                 /* all RED: unique Drop table for jump action. */
16634                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16635                                         struct mlx5_flow_tbl_data_entry, tbl);
16636                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16637                                                 tbl_data->jump.action;
16638                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16639                 acts[RTE_COLOR_RED].actions_n = 1;
16640                 /* Create default policy rules. */
16641                 ret = __flow_dv_create_domain_policy_rules(dev,
16642                                         &def_policy->sub_policy,
16643                                         egress, transfer, false, acts);
16644                 if (ret) {
16645                         DRV_LOG(ERR, "Failed to create default policy rules.");
16646                         goto def_policy_error;
16647                 }
16648         }
16649         return 0;
16650 def_policy_error:
16651         __flow_dv_destroy_domain_def_policy(dev,
16652                                             (enum mlx5_meter_domain)domain);
16653         return -1;
16654 }
16655
16656 /**
16657  * Create the default policy table set.
16658  *
16659  * @param[in] dev
16660  *   Pointer to Ethernet device.
16661  * @return
16662  *   0 on success, -1 otherwise.
16663  */
16664 static int
16665 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16666 {
16667         struct mlx5_priv *priv = dev->data->dev_private;
16668         int i;
16669
16670         /* Non-termination policy table. */
16671         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16672                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16673                         continue;
16674                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16675                         DRV_LOG(ERR, "Failed to create default policy");
16676                         /* Rollback the created default policies for others. */
16677                         flow_dv_destroy_def_policy(dev);
16678                         return -1;
16679                 }
16680         }
16681         return 0;
16682 }
16683
16684 /**
16685  * Create the needed meter tables.
16686  * Lock free, (mutex should be acquired by caller).
16687  *
16688  * @param[in] dev
16689  *   Pointer to Ethernet device.
16690  * @param[in] fm
16691  *   Meter information table.
16692  * @param[in] mtr_idx
16693  *   Meter index.
16694  * @param[in] domain_bitmap
16695  *   Domain bitmap.
16696  * @return
16697  *   0 on success, -1 otherwise.
16698  */
16699 static int
16700 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16701                         struct mlx5_flow_meter_info *fm,
16702                         uint32_t mtr_idx,
16703                         uint8_t domain_bitmap)
16704 {
16705         struct mlx5_priv *priv = dev->data->dev_private;
16706         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16707         struct rte_flow_error error;
16708         struct mlx5_flow_tbl_data_entry *tbl_data;
16709         uint8_t egress, transfer;
16710         void *actions[METER_ACTIONS];
16711         int domain, ret, i;
16712         struct mlx5_flow_counter *cnt;
16713         struct mlx5_flow_dv_match_params value = {
16714                 .size = sizeof(value.buf),
16715         };
16716         struct mlx5_flow_dv_match_params matcher_para = {
16717                 .size = sizeof(matcher_para.buf),
16718         };
16719         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16720                                                      0, &error);
16721         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16722         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16723         struct mlx5_list_entry *entry;
16724         struct mlx5_flow_dv_matcher matcher = {
16725                 .mask = {
16726                         .size = sizeof(matcher.mask.buf),
16727                 },
16728         };
16729         struct mlx5_flow_dv_matcher *drop_matcher;
16730         struct mlx5_flow_cb_ctx ctx = {
16731                 .error = &error,
16732                 .data = &matcher,
16733         };
16734         uint8_t misc_mask;
16735
16736         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16737                 rte_errno = ENOTSUP;
16738                 return -1;
16739         }
16740         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16741                 if (!(domain_bitmap & (1 << domain)) ||
16742                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16743                         continue;
16744                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16745                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16746                 /* Create the drop table with METER DROP level. */
16747                 if (!mtrmng->drop_tbl[domain]) {
16748                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16749                                         MLX5_FLOW_TABLE_LEVEL_METER,
16750                                         egress, transfer, false, NULL, 0,
16751                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16752                         if (!mtrmng->drop_tbl[domain]) {
16753                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16754                                 goto policy_error;
16755                         }
16756                 }
16757                 /* Create default matcher in drop table. */
16758                 matcher.tbl = mtrmng->drop_tbl[domain],
16759                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16760                                 struct mlx5_flow_tbl_data_entry, tbl);
16761                 if (!mtrmng->def_matcher[domain]) {
16762                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16763                                        (enum modify_reg)mtr_id_reg_c,
16764                                        0, 0);
16765                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16766                         matcher.crc = rte_raw_cksum
16767                                         ((const void *)matcher.mask.buf,
16768                                         matcher.mask.size);
16769                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16770                         if (!entry) {
16771                                 DRV_LOG(ERR, "Failed to register meter "
16772                                 "drop default matcher.");
16773                                 goto policy_error;
16774                         }
16775                         mtrmng->def_matcher[domain] = container_of(entry,
16776                         struct mlx5_flow_dv_matcher, entry);
16777                 }
16778                 /* Create default rule in drop table. */
16779                 if (!mtrmng->def_rule[domain]) {
16780                         i = 0;
16781                         actions[i++] = priv->sh->dr_drop_action;
16782                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16783                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16784                         misc_mask = flow_dv_matcher_enable(value.buf);
16785                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16786                         ret = mlx5_flow_os_create_flow
16787                                 (mtrmng->def_matcher[domain]->matcher_object,
16788                                 (void *)&value, i, actions,
16789                                 &mtrmng->def_rule[domain]);
16790                         if (ret) {
16791                                 DRV_LOG(ERR, "Failed to create meter "
16792                                 "default drop rule for drop table.");
16793                                 goto policy_error;
16794                         }
16795                 }
16796                 if (!fm->drop_cnt)
16797                         continue;
16798                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16799                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16800                         /* Create matchers for Drop. */
16801                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16802                                         (enum modify_reg)mtr_id_reg_c, 0,
16803                                         (mtr_id_mask << mtr_id_offset));
16804                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16805                         matcher.crc = rte_raw_cksum
16806                                         ((const void *)matcher.mask.buf,
16807                                         matcher.mask.size);
16808                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16809                         if (!entry) {
16810                                 DRV_LOG(ERR,
16811                                 "Failed to register meter drop matcher.");
16812                                 goto policy_error;
16813                         }
16814                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16815                                 container_of(entry, struct mlx5_flow_dv_matcher,
16816                                              entry);
16817                 }
16818                 drop_matcher =
16819                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16820                 /* Create drop rule, matching meter_id only. */
16821                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16822                                 (enum modify_reg)mtr_id_reg_c,
16823                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16824                 i = 0;
16825                 cnt = flow_dv_counter_get_by_idx(dev,
16826                                         fm->drop_cnt, NULL);
16827                 actions[i++] = cnt->action;
16828                 actions[i++] = priv->sh->dr_drop_action;
16829                 misc_mask = flow_dv_matcher_enable(value.buf);
16830                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16831                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16832                                                (void *)&value, i, actions,
16833                                                &fm->drop_rule[domain]);
16834                 if (ret) {
16835                         DRV_LOG(ERR, "Failed to create meter "
16836                                 "drop rule for drop table.");
16837                                 goto policy_error;
16838                 }
16839         }
16840         return 0;
16841 policy_error:
16842         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16843                 if (fm->drop_rule[i]) {
16844                         claim_zero(mlx5_flow_os_destroy_flow
16845                                 (fm->drop_rule[i]));
16846                         fm->drop_rule[i] = NULL;
16847                 }
16848         }
16849         return -1;
16850 }
16851
16852 static struct mlx5_flow_meter_sub_policy *
16853 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16854                 struct mlx5_flow_meter_policy *mtr_policy,
16855                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16856                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16857                 bool *is_reuse)
16858 {
16859         struct mlx5_priv *priv = dev->data->dev_private;
16860         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16861         uint32_t sub_policy_idx = 0;
16862         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16863         uint32_t i, j;
16864         struct mlx5_hrxq *hrxq;
16865         struct mlx5_flow_handle dh;
16866         struct mlx5_meter_policy_action_container *act_cnt;
16867         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16868         uint16_t sub_policy_num;
16869
16870         rte_spinlock_lock(&mtr_policy->sl);
16871         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16872                 if (!rss_desc[i])
16873                         continue;
16874                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16875                 if (!hrxq_idx[i]) {
16876                         rte_spinlock_unlock(&mtr_policy->sl);
16877                         return NULL;
16878                 }
16879         }
16880         sub_policy_num = (mtr_policy->sub_policy_num >>
16881                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16882                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16883         for (j = 0; j < sub_policy_num; j++) {
16884                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16885                         if (rss_desc[i] &&
16886                             hrxq_idx[i] !=
16887                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16888                                 break;
16889                 }
16890                 if (i >= MLX5_MTR_RTE_COLORS) {
16891                         /*
16892                          * Found the sub policy table with
16893                          * the same queue per color.
16894                          */
16895                         rte_spinlock_unlock(&mtr_policy->sl);
16896                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16897                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16898                         *is_reuse = true;
16899                         return mtr_policy->sub_policys[domain][j];
16900                 }
16901         }
16902         /* Create sub policy. */
16903         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16904                 /* Reuse the first pre-allocated sub_policy. */
16905                 sub_policy = mtr_policy->sub_policys[domain][0];
16906                 sub_policy_idx = sub_policy->idx;
16907         } else {
16908                 sub_policy = mlx5_ipool_zmalloc
16909                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16910                                  &sub_policy_idx);
16911                 if (!sub_policy ||
16912                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16913                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16914                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16915                         goto rss_sub_policy_error;
16916                 }
16917                 sub_policy->idx = sub_policy_idx;
16918                 sub_policy->main_policy = mtr_policy;
16919         }
16920         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16921                 if (!rss_desc[i])
16922                         continue;
16923                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16924                 if (mtr_policy->is_hierarchy) {
16925                         act_cnt = &mtr_policy->act_cnt[i];
16926                         act_cnt->next_sub_policy = next_sub_policy;
16927                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16928                 } else {
16929                         /*
16930                          * Overwrite the last action from
16931                          * RSS action to Queue action.
16932                          */
16933                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16934                                               hrxq_idx[i]);
16935                         if (!hrxq) {
16936                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16937                                 goto rss_sub_policy_error;
16938                         }
16939                         act_cnt = &mtr_policy->act_cnt[i];
16940                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16941                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16942                                 if (act_cnt->rix_mark)
16943                                         dh.mark = 1;
16944                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16945                                 dh.rix_hrxq = hrxq_idx[i];
16946                                 flow_drv_rxq_flags_set(dev, &dh);
16947                         }
16948                 }
16949         }
16950         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16951                                                sub_policy, domain)) {
16952                 DRV_LOG(ERR, "Failed to create policy "
16953                         "rules for ingress domain.");
16954                 goto rss_sub_policy_error;
16955         }
16956         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16957                 i = (mtr_policy->sub_policy_num >>
16958                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16959                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16960                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16961                         DRV_LOG(ERR, "No free sub-policy slot.");
16962                         goto rss_sub_policy_error;
16963                 }
16964                 mtr_policy->sub_policys[domain][i] = sub_policy;
16965                 i++;
16966                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16967                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16968                 mtr_policy->sub_policy_num |=
16969                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16970                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16971         }
16972         rte_spinlock_unlock(&mtr_policy->sl);
16973         *is_reuse = false;
16974         return sub_policy;
16975 rss_sub_policy_error:
16976         if (sub_policy) {
16977                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16978                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16979                         i = (mtr_policy->sub_policy_num >>
16980                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16981                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16982                         mtr_policy->sub_policys[domain][i] = NULL;
16983                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16984                                         sub_policy->idx);
16985                 }
16986         }
16987         rte_spinlock_unlock(&mtr_policy->sl);
16988         return NULL;
16989 }
16990
16991 /**
16992  * Find the policy table for prefix table with RSS.
16993  *
16994  * @param[in] dev
16995  *   Pointer to Ethernet device.
16996  * @param[in] mtr_policy
16997  *   Pointer to meter policy table.
16998  * @param[in] rss_desc
16999  *   Pointer to rss_desc
17000  * @return
17001  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17002  */
17003 static struct mlx5_flow_meter_sub_policy *
17004 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17005                 struct mlx5_flow_meter_policy *mtr_policy,
17006                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17007 {
17008         struct mlx5_priv *priv = dev->data->dev_private;
17009         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17010         struct mlx5_flow_meter_info *next_fm;
17011         struct mlx5_flow_meter_policy *next_policy;
17012         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17013         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17014         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17015         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17016         bool reuse_sub_policy;
17017         uint32_t i = 0;
17018         uint32_t j = 0;
17019
17020         while (true) {
17021                 /* Iterate hierarchy to get all policies in this hierarchy. */
17022                 policies[i++] = mtr_policy;
17023                 if (!mtr_policy->is_hierarchy)
17024                         break;
17025                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17026                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17027                         return NULL;
17028                 }
17029                 next_fm = mlx5_flow_meter_find(priv,
17030                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17031                 if (!next_fm) {
17032                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17033                         return NULL;
17034                 }
17035                 next_policy =
17036                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17037                                                     NULL);
17038                 MLX5_ASSERT(next_policy);
17039                 mtr_policy = next_policy;
17040         }
17041         while (i) {
17042                 /**
17043                  * From last policy to the first one in hierarchy,
17044                  * create / get the sub policy for each of them.
17045                  */
17046                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17047                                                         policies[--i],
17048                                                         rss_desc,
17049                                                         next_sub_policy,
17050                                                         &reuse_sub_policy);
17051                 if (!sub_policy) {
17052                         DRV_LOG(ERR, "Failed to get the sub policy.");
17053                         goto err_exit;
17054                 }
17055                 if (!reuse_sub_policy)
17056                         sub_policies[j++] = sub_policy;
17057                 next_sub_policy = sub_policy;
17058         }
17059         return sub_policy;
17060 err_exit:
17061         while (j) {
17062                 uint16_t sub_policy_num;
17063
17064                 sub_policy = sub_policies[--j];
17065                 mtr_policy = sub_policy->main_policy;
17066                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17067                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17068                         sub_policy_num = (mtr_policy->sub_policy_num >>
17069                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17070                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17071                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17072                                                                         NULL;
17073                         sub_policy_num--;
17074                         mtr_policy->sub_policy_num &=
17075                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17076                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17077                         mtr_policy->sub_policy_num |=
17078                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17079                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17080                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17081                                         sub_policy->idx);
17082                 }
17083         }
17084         return NULL;
17085 }
17086
17087 /**
17088  * Create the sub policy tag rule for all meters in hierarchy.
17089  *
17090  * @param[in] dev
17091  *   Pointer to Ethernet device.
17092  * @param[in] fm
17093  *   Meter information table.
17094  * @param[in] src_port
17095  *   The src port this extra rule should use.
17096  * @param[in] item
17097  *   The src port match item.
17098  * @param[out] error
17099  *   Perform verbose error reporting if not NULL.
17100  * @return
17101  *   0 on success, a negative errno value otherwise and rte_errno is set.
17102  */
17103 static int
17104 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17105                                 struct mlx5_flow_meter_info *fm,
17106                                 int32_t src_port,
17107                                 const struct rte_flow_item *item,
17108                                 struct rte_flow_error *error)
17109 {
17110         struct mlx5_priv *priv = dev->data->dev_private;
17111         struct mlx5_flow_meter_policy *mtr_policy;
17112         struct mlx5_flow_meter_sub_policy *sub_policy;
17113         struct mlx5_flow_meter_info *next_fm = NULL;
17114         struct mlx5_flow_meter_policy *next_policy;
17115         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17116         struct mlx5_flow_tbl_data_entry *tbl_data;
17117         struct mlx5_sub_policy_color_rule *color_rule;
17118         struct mlx5_meter_policy_acts acts;
17119         uint32_t color_reg_c_idx;
17120         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17121         struct rte_flow_attr attr = {
17122                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17123                 .priority = 0,
17124                 .ingress = 0,
17125                 .egress = 0,
17126                 .transfer = 1,
17127                 .reserved = 0,
17128         };
17129         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17130         int i;
17131
17132         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17133         MLX5_ASSERT(mtr_policy);
17134         if (!mtr_policy->is_hierarchy)
17135                 return 0;
17136         next_fm = mlx5_flow_meter_find(priv,
17137                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17138         if (!next_fm) {
17139                 return rte_flow_error_set(error, EINVAL,
17140                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17141                                 "Failed to find next meter in hierarchy.");
17142         }
17143         if (!next_fm->drop_cnt)
17144                 goto exit;
17145         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17146         sub_policy = mtr_policy->sub_policys[domain][0];
17147         for (i = 0; i < RTE_COLORS; i++) {
17148                 bool rule_exist = false;
17149                 struct mlx5_meter_policy_action_container *act_cnt;
17150
17151                 if (i >= RTE_COLOR_YELLOW)
17152                         break;
17153                 TAILQ_FOREACH(color_rule,
17154                               &sub_policy->color_rules[i], next_port)
17155                         if (color_rule->src_port == src_port) {
17156                                 rule_exist = true;
17157                                 break;
17158                         }
17159                 if (rule_exist)
17160                         continue;
17161                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17162                                 sizeof(struct mlx5_sub_policy_color_rule),
17163                                 0, SOCKET_ID_ANY);
17164                 if (!color_rule)
17165                         return rte_flow_error_set(error, ENOMEM,
17166                                 RTE_FLOW_ERROR_TYPE_ACTION,
17167                                 NULL, "No memory to create tag color rule.");
17168                 color_rule->src_port = src_port;
17169                 attr.priority = i;
17170                 next_policy = mlx5_flow_meter_policy_find(dev,
17171                                                 next_fm->policy_id, NULL);
17172                 MLX5_ASSERT(next_policy);
17173                 next_sub_policy = next_policy->sub_policys[domain][0];
17174                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17175                                         struct mlx5_flow_tbl_data_entry, tbl);
17176                 act_cnt = &mtr_policy->act_cnt[i];
17177                 if (mtr_first) {
17178                         acts.dv_actions[0] = next_fm->meter_action;
17179                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17180                 } else {
17181                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17182                         acts.dv_actions[1] = next_fm->meter_action;
17183                 }
17184                 acts.dv_actions[2] = tbl_data->jump.action;
17185                 acts.actions_n = 3;
17186                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17187                         next_fm = NULL;
17188                         goto err_exit;
17189                 }
17190                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17191                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17192                                 &attr, true, item,
17193                                 &color_rule->matcher, error)) {
17194                         rte_flow_error_set(error, errno,
17195                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17196                                 "Failed to create hierarchy meter matcher.");
17197                         goto err_exit;
17198                 }
17199                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17200                                         (enum rte_color)i,
17201                                         color_rule->matcher->matcher_object,
17202                                         acts.actions_n, acts.dv_actions,
17203                                         true, item,
17204                                         &color_rule->rule, &attr)) {
17205                         rte_flow_error_set(error, errno,
17206                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17207                                 "Failed to create hierarchy meter rule.");
17208                         goto err_exit;
17209                 }
17210                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17211                                   color_rule, next_port);
17212         }
17213 exit:
17214         /**
17215          * Recursive call to iterate all meters in hierarchy and
17216          * create needed rules.
17217          */
17218         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17219                                                 src_port, item, error);
17220 err_exit:
17221         if (color_rule) {
17222                 if (color_rule->rule)
17223                         mlx5_flow_os_destroy_flow(color_rule->rule);
17224                 if (color_rule->matcher) {
17225                         struct mlx5_flow_tbl_data_entry *tbl =
17226                                 container_of(color_rule->matcher->tbl,
17227                                                 typeof(*tbl), tbl);
17228                         mlx5_list_unregister(tbl->matchers,
17229                                                 &color_rule->matcher->entry);
17230                 }
17231                 mlx5_free(color_rule);
17232         }
17233         if (next_fm)
17234                 mlx5_flow_meter_detach(priv, next_fm);
17235         return -rte_errno;
17236 }
17237
17238 /**
17239  * Destroy the sub policy table with RX queue.
17240  *
17241  * @param[in] dev
17242  *   Pointer to Ethernet device.
17243  * @param[in] mtr_policy
17244  *   Pointer to meter policy table.
17245  */
17246 static void
17247 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17248                                     struct mlx5_flow_meter_policy *mtr_policy)
17249 {
17250         struct mlx5_priv *priv = dev->data->dev_private;
17251         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17252         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17253         uint32_t i, j;
17254         uint16_t sub_policy_num, new_policy_num;
17255
17256         rte_spinlock_lock(&mtr_policy->sl);
17257         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17258                 switch (mtr_policy->act_cnt[i].fate_action) {
17259                 case MLX5_FLOW_FATE_SHARED_RSS:
17260                         sub_policy_num = (mtr_policy->sub_policy_num >>
17261                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17262                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17263                         new_policy_num = sub_policy_num;
17264                         for (j = 0; j < sub_policy_num; j++) {
17265                                 sub_policy =
17266                                         mtr_policy->sub_policys[domain][j];
17267                                 if (sub_policy) {
17268                                         __flow_dv_destroy_sub_policy_rules(dev,
17269                                                 sub_policy);
17270                                 if (sub_policy !=
17271                                         mtr_policy->sub_policys[domain][0]) {
17272                                         mtr_policy->sub_policys[domain][j] =
17273                                                                 NULL;
17274                                         mlx5_ipool_free
17275                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17276                                                 sub_policy->idx);
17277                                                 new_policy_num--;
17278                                         }
17279                                 }
17280                         }
17281                         if (new_policy_num != sub_policy_num) {
17282                                 mtr_policy->sub_policy_num &=
17283                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17284                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17285                                 mtr_policy->sub_policy_num |=
17286                                 (new_policy_num &
17287                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17288                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17289                         }
17290                         break;
17291                 case MLX5_FLOW_FATE_QUEUE:
17292                         sub_policy = mtr_policy->sub_policys[domain][0];
17293                         __flow_dv_destroy_sub_policy_rules(dev,
17294                                                            sub_policy);
17295                         break;
17296                 default:
17297                         /*Other actions without queue and do nothing*/
17298                         break;
17299                 }
17300         }
17301         rte_spinlock_unlock(&mtr_policy->sl);
17302 }
17303 /**
17304  * Check whether the DR drop action is supported on the root table or not.
17305  *
17306  * Create a simple flow with DR drop action on root table to validate
17307  * if DR drop action on root table is supported or not.
17308  *
17309  * @param[in] dev
17310  *   Pointer to rte_eth_dev structure.
17311  *
17312  * @return
17313  *   0 on success, a negative errno value otherwise and rte_errno is set.
17314  */
17315 int
17316 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17317 {
17318         struct mlx5_priv *priv = dev->data->dev_private;
17319         struct mlx5_dev_ctx_shared *sh = priv->sh;
17320         struct mlx5_flow_dv_match_params mask = {
17321                 .size = sizeof(mask.buf),
17322         };
17323         struct mlx5_flow_dv_match_params value = {
17324                 .size = sizeof(value.buf),
17325         };
17326         struct mlx5dv_flow_matcher_attr dv_attr = {
17327                 .type = IBV_FLOW_ATTR_NORMAL,
17328                 .priority = 0,
17329                 .match_criteria_enable = 0,
17330                 .match_mask = (void *)&mask,
17331         };
17332         struct mlx5_flow_tbl_resource *tbl = NULL;
17333         void *matcher = NULL;
17334         void *flow = NULL;
17335         int ret = -1;
17336
17337         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17338                                         0, 0, 0, NULL);
17339         if (!tbl)
17340                 goto err;
17341         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17342         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17343         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17344                                                tbl->obj, &matcher);
17345         if (ret)
17346                 goto err;
17347         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17348         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17349                                        &sh->dr_drop_action, &flow);
17350 err:
17351         /*
17352          * If DR drop action is not supported on root table, flow create will
17353          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17354          */
17355         if (!flow) {
17356                 if (matcher &&
17357                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17358                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17359                 else
17360                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17361                 ret = -1;
17362         } else {
17363                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17364         }
17365         if (matcher)
17366                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17367         if (tbl)
17368                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17369         return ret;
17370 }
17371
17372 /**
17373  * Validate the batch counter support in root table.
17374  *
17375  * Create a simple flow with invalid counter and drop action on root table to
17376  * validate if batch counter with offset on root table is supported or not.
17377  *
17378  * @param[in] dev
17379  *   Pointer to rte_eth_dev structure.
17380  *
17381  * @return
17382  *   0 on success, a negative errno value otherwise and rte_errno is set.
17383  */
17384 int
17385 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17386 {
17387         struct mlx5_priv *priv = dev->data->dev_private;
17388         struct mlx5_dev_ctx_shared *sh = priv->sh;
17389         struct mlx5_flow_dv_match_params mask = {
17390                 .size = sizeof(mask.buf),
17391         };
17392         struct mlx5_flow_dv_match_params value = {
17393                 .size = sizeof(value.buf),
17394         };
17395         struct mlx5dv_flow_matcher_attr dv_attr = {
17396                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17397                 .priority = 0,
17398                 .match_criteria_enable = 0,
17399                 .match_mask = (void *)&mask,
17400         };
17401         void *actions[2] = { 0 };
17402         struct mlx5_flow_tbl_resource *tbl = NULL;
17403         struct mlx5_devx_obj *dcs = NULL;
17404         void *matcher = NULL;
17405         void *flow = NULL;
17406         int ret = -1;
17407
17408         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17409                                         0, 0, 0, NULL);
17410         if (!tbl)
17411                 goto err;
17412         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17413         if (!dcs)
17414                 goto err;
17415         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17416                                                     &actions[0]);
17417         if (ret)
17418                 goto err;
17419         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17420         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17421         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17422                                                tbl->obj, &matcher);
17423         if (ret)
17424                 goto err;
17425         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17426         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17427                                        actions, &flow);
17428 err:
17429         /*
17430          * If batch counter with offset is not supported, the driver will not
17431          * validate the invalid offset value, flow create should success.
17432          * In this case, it means batch counter is not supported in root table.
17433          *
17434          * Otherwise, if flow create is failed, counter offset is supported.
17435          */
17436         if (flow) {
17437                 DRV_LOG(INFO, "Batch counter is not supported in root "
17438                               "table. Switch to fallback mode.");
17439                 rte_errno = ENOTSUP;
17440                 ret = -rte_errno;
17441                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17442         } else {
17443                 /* Check matcher to make sure validate fail at flow create. */
17444                 if (!matcher || (matcher && errno != EINVAL))
17445                         DRV_LOG(ERR, "Unexpected error in counter offset "
17446                                      "support detection");
17447                 ret = 0;
17448         }
17449         if (actions[0])
17450                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17451         if (matcher)
17452                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17453         if (tbl)
17454                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17455         if (dcs)
17456                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17457         return ret;
17458 }
17459
17460 /**
17461  * Query a devx counter.
17462  *
17463  * @param[in] dev
17464  *   Pointer to the Ethernet device structure.
17465  * @param[in] cnt
17466  *   Index to the flow counter.
17467  * @param[in] clear
17468  *   Set to clear the counter statistics.
17469  * @param[out] pkts
17470  *   The statistics value of packets.
17471  * @param[out] bytes
17472  *   The statistics value of bytes.
17473  *
17474  * @return
17475  *   0 on success, otherwise return -1.
17476  */
17477 static int
17478 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17479                       uint64_t *pkts, uint64_t *bytes)
17480 {
17481         struct mlx5_priv *priv = dev->data->dev_private;
17482         struct mlx5_flow_counter *cnt;
17483         uint64_t inn_pkts, inn_bytes;
17484         int ret;
17485
17486         if (!priv->sh->devx)
17487                 return -1;
17488
17489         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17490         if (ret)
17491                 return -1;
17492         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17493         *pkts = inn_pkts - cnt->hits;
17494         *bytes = inn_bytes - cnt->bytes;
17495         if (clear) {
17496                 cnt->hits = inn_pkts;
17497                 cnt->bytes = inn_bytes;
17498         }
17499         return 0;
17500 }
17501
17502 /**
17503  * Get aged-out flows.
17504  *
17505  * @param[in] dev
17506  *   Pointer to the Ethernet device structure.
17507  * @param[in] context
17508  *   The address of an array of pointers to the aged-out flows contexts.
17509  * @param[in] nb_contexts
17510  *   The length of context array pointers.
17511  * @param[out] error
17512  *   Perform verbose error reporting if not NULL. Initialized in case of
17513  *   error only.
17514  *
17515  * @return
17516  *   how many contexts get in success, otherwise negative errno value.
17517  *   if nb_contexts is 0, return the amount of all aged contexts.
17518  *   if nb_contexts is not 0 , return the amount of aged flows reported
17519  *   in the context array.
17520  * @note: only stub for now
17521  */
17522 static int
17523 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17524                     void **context,
17525                     uint32_t nb_contexts,
17526                     struct rte_flow_error *error)
17527 {
17528         struct mlx5_priv *priv = dev->data->dev_private;
17529         struct mlx5_age_info *age_info;
17530         struct mlx5_age_param *age_param;
17531         struct mlx5_flow_counter *counter;
17532         struct mlx5_aso_age_action *act;
17533         int nb_flows = 0;
17534
17535         if (nb_contexts && !context)
17536                 return rte_flow_error_set(error, EINVAL,
17537                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17538                                           NULL, "empty context");
17539         age_info = GET_PORT_AGE_INFO(priv);
17540         rte_spinlock_lock(&age_info->aged_sl);
17541         LIST_FOREACH(act, &age_info->aged_aso, next) {
17542                 nb_flows++;
17543                 if (nb_contexts) {
17544                         context[nb_flows - 1] =
17545                                                 act->age_params.context;
17546                         if (!(--nb_contexts))
17547                                 break;
17548                 }
17549         }
17550         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17551                 nb_flows++;
17552                 if (nb_contexts) {
17553                         age_param = MLX5_CNT_TO_AGE(counter);
17554                         context[nb_flows - 1] = age_param->context;
17555                         if (!(--nb_contexts))
17556                                 break;
17557                 }
17558         }
17559         rte_spinlock_unlock(&age_info->aged_sl);
17560         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17561         return nb_flows;
17562 }
17563
17564 /*
17565  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17566  */
17567 static uint32_t
17568 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17569 {
17570         return flow_dv_counter_alloc(dev, 0);
17571 }
17572
17573 /**
17574  * Validate indirect action.
17575  * Dispatcher for action type specific validation.
17576  *
17577  * @param[in] dev
17578  *   Pointer to the Ethernet device structure.
17579  * @param[in] conf
17580  *   Indirect action configuration.
17581  * @param[in] action
17582  *   The indirect action object to validate.
17583  * @param[out] error
17584  *   Perform verbose error reporting if not NULL. Initialized in case of
17585  *   error only.
17586  *
17587  * @return
17588  *   0 on success, otherwise negative errno value.
17589  */
17590 static int
17591 flow_dv_action_validate(struct rte_eth_dev *dev,
17592                         const struct rte_flow_indir_action_conf *conf,
17593                         const struct rte_flow_action *action,
17594                         struct rte_flow_error *err)
17595 {
17596         struct mlx5_priv *priv = dev->data->dev_private;
17597
17598         RTE_SET_USED(conf);
17599         switch (action->type) {
17600         case RTE_FLOW_ACTION_TYPE_RSS:
17601                 /*
17602                  * priv->obj_ops is set according to driver capabilities.
17603                  * When DevX capabilities are
17604                  * sufficient, it is set to devx_obj_ops.
17605                  * Otherwise, it is set to ibv_obj_ops.
17606                  * ibv_obj_ops doesn't support ind_table_modify operation.
17607                  * In this case the indirect RSS action can't be used.
17608                  */
17609                 if (priv->obj_ops.ind_table_modify == NULL)
17610                         return rte_flow_error_set
17611                                         (err, ENOTSUP,
17612                                          RTE_FLOW_ERROR_TYPE_ACTION,
17613                                          NULL,
17614                                          "Indirect RSS action not supported");
17615                 return mlx5_validate_action_rss(dev, action, err);
17616         case RTE_FLOW_ACTION_TYPE_AGE:
17617                 if (!priv->sh->aso_age_mng)
17618                         return rte_flow_error_set(err, ENOTSUP,
17619                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17620                                                 NULL,
17621                                                 "Indirect age action not supported");
17622                 return flow_dv_validate_action_age(0, action, dev, err);
17623         case RTE_FLOW_ACTION_TYPE_COUNT:
17624                 return flow_dv_validate_action_count(dev, true, 0, err);
17625         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17626                 if (!priv->sh->ct_aso_en)
17627                         return rte_flow_error_set(err, ENOTSUP,
17628                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17629                                         "ASO CT is not supported");
17630                 return mlx5_validate_action_ct(dev, action->conf, err);
17631         default:
17632                 return rte_flow_error_set(err, ENOTSUP,
17633                                           RTE_FLOW_ERROR_TYPE_ACTION,
17634                                           NULL,
17635                                           "action type not supported");
17636         }
17637 }
17638
17639 /*
17640  * Check if the RSS configurations for colors of a meter policy match
17641  * each other, except the queues.
17642  *
17643  * @param[in] r1
17644  *   Pointer to the first RSS flow action.
17645  * @param[in] r2
17646  *   Pointer to the second RSS flow action.
17647  *
17648  * @return
17649  *   0 on match, 1 on conflict.
17650  */
17651 static inline int
17652 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17653                                const struct rte_flow_action_rss *r2)
17654 {
17655         if (r1 == NULL || r2 == NULL)
17656                 return 0;
17657         if (!(r1->level <= 1 && r2->level <= 1) &&
17658             !(r1->level > 1 && r2->level > 1))
17659                 return 1;
17660         if (r1->types != r2->types &&
17661             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17662               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17663                 return 1;
17664         if (r1->key || r2->key) {
17665                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17666                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17667
17668                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17669                         return 1;
17670         }
17671         return 0;
17672 }
17673
17674 /**
17675  * Validate the meter hierarchy chain for meter policy.
17676  *
17677  * @param[in] dev
17678  *   Pointer to the Ethernet device structure.
17679  * @param[in] meter_id
17680  *   Meter id.
17681  * @param[in] action_flags
17682  *   Holds the actions detected until now.
17683  * @param[out] is_rss
17684  *   Is RSS or not.
17685  * @param[out] hierarchy_domain
17686  *   The domain bitmap for hierarchy policy.
17687  * @param[out] error
17688  *   Perform verbose error reporting if not NULL. Initialized in case of
17689  *   error only.
17690  *
17691  * @return
17692  *   0 on success, otherwise negative errno value with error set.
17693  */
17694 static int
17695 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17696                                   uint32_t meter_id,
17697                                   uint64_t action_flags,
17698                                   bool *is_rss,
17699                                   uint8_t *hierarchy_domain,
17700                                   struct rte_mtr_error *error)
17701 {
17702         struct mlx5_priv *priv = dev->data->dev_private;
17703         struct mlx5_flow_meter_info *fm;
17704         struct mlx5_flow_meter_policy *policy;
17705         uint8_t cnt = 1;
17706
17707         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17708                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17709                 return -rte_mtr_error_set(error, EINVAL,
17710                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17711                                         NULL,
17712                                         "Multiple fate actions not supported.");
17713         *hierarchy_domain = 0;
17714         while (true) {
17715                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17716                 if (!fm)
17717                         return -rte_mtr_error_set(error, EINVAL,
17718                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17719                                         "Meter not found in meter hierarchy.");
17720                 if (fm->def_policy)
17721                         return -rte_mtr_error_set(error, EINVAL,
17722                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17723                         "Non termination meter not supported in hierarchy.");
17724                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17725                 MLX5_ASSERT(policy);
17726                 /**
17727                  * Only inherit the supported domains of the first meter in
17728                  * hierarchy.
17729                  * One meter supports at least one domain.
17730                  */
17731                 if (!*hierarchy_domain) {
17732                         if (policy->transfer)
17733                                 *hierarchy_domain |=
17734                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17735                         if (policy->ingress)
17736                                 *hierarchy_domain |=
17737                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17738                         if (policy->egress)
17739                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17740                 }
17741                 if (!policy->is_hierarchy) {
17742                         *is_rss = policy->is_rss;
17743                         break;
17744                 }
17745                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17746                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17747                         return -rte_mtr_error_set(error, EINVAL,
17748                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17749                                         "Exceed max hierarchy meter number.");
17750         }
17751         return 0;
17752 }
17753
17754 /**
17755  * Validate meter policy actions.
17756  * Dispatcher for action type specific validation.
17757  *
17758  * @param[in] dev
17759  *   Pointer to the Ethernet device structure.
17760  * @param[in] action
17761  *   The meter policy action object to validate.
17762  * @param[in] attr
17763  *   Attributes of flow to determine steering domain.
17764  * @param[out] error
17765  *   Perform verbose error reporting if not NULL. Initialized in case of
17766  *   error only.
17767  *
17768  * @return
17769  *   0 on success, otherwise negative errno value.
17770  */
17771 static int
17772 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17773                         const struct rte_flow_action *actions[RTE_COLORS],
17774                         struct rte_flow_attr *attr,
17775                         bool *is_rss,
17776                         uint8_t *domain_bitmap,
17777                         uint8_t *policy_mode,
17778                         struct rte_mtr_error *error)
17779 {
17780         struct mlx5_priv *priv = dev->data->dev_private;
17781         struct mlx5_dev_config *dev_conf = &priv->config;
17782         const struct rte_flow_action *act;
17783         uint64_t action_flags[RTE_COLORS] = {0};
17784         int actions_n;
17785         int i, ret;
17786         struct rte_flow_error flow_err;
17787         uint8_t domain_color[RTE_COLORS] = {0};
17788         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17789         uint8_t hierarchy_domain = 0;
17790         const struct rte_flow_action_meter *mtr;
17791         bool def_green = false;
17792         bool def_yellow = false;
17793         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17794
17795         if (!priv->config.dv_esw_en)
17796                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17797         *domain_bitmap = def_domain;
17798         /* Red color could only support DROP action. */
17799         if (!actions[RTE_COLOR_RED] ||
17800             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17801                 return -rte_mtr_error_set(error, ENOTSUP,
17802                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17803                                 NULL, "Red color only supports drop action.");
17804         /*
17805          * Check default policy actions:
17806          * Green / Yellow: no action, Red: drop action
17807          * Either G or Y will trigger default policy actions to be created.
17808          */
17809         if (!actions[RTE_COLOR_GREEN] ||
17810             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17811                 def_green = true;
17812         if (!actions[RTE_COLOR_YELLOW] ||
17813             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17814                 def_yellow = true;
17815         if (def_green && def_yellow) {
17816                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17817                 return 0;
17818         } else if (!def_green && def_yellow) {
17819                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17820         } else if (def_green && !def_yellow) {
17821                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17822         } else {
17823                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17824         }
17825         /* Set to empty string in case of NULL pointer access by user. */
17826         flow_err.message = "";
17827         for (i = 0; i < RTE_COLORS; i++) {
17828                 act = actions[i];
17829                 for (action_flags[i] = 0, actions_n = 0;
17830                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17831                      act++) {
17832                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17833                                 return -rte_mtr_error_set(error, ENOTSUP,
17834                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17835                                           NULL, "too many actions");
17836                         switch (act->type) {
17837                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17838                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17839                                 if (!priv->config.dv_esw_en)
17840                                         return -rte_mtr_error_set(error,
17841                                         ENOTSUP,
17842                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17843                                         NULL, "PORT action validate check"
17844                                         " fail for ESW disable");
17845                                 ret = flow_dv_validate_action_port_id(dev,
17846                                                 action_flags[i],
17847                                                 act, attr, &flow_err);
17848                                 if (ret)
17849                                         return -rte_mtr_error_set(error,
17850                                         ENOTSUP,
17851                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17852                                         NULL, flow_err.message ?
17853                                         flow_err.message :
17854                                         "PORT action validate check fail");
17855                                 ++actions_n;
17856                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17857                                 break;
17858                         case RTE_FLOW_ACTION_TYPE_MARK:
17859                                 ret = flow_dv_validate_action_mark(dev, act,
17860                                                            action_flags[i],
17861                                                            attr, &flow_err);
17862                                 if (ret < 0)
17863                                         return -rte_mtr_error_set(error,
17864                                         ENOTSUP,
17865                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17866                                         NULL, flow_err.message ?
17867                                         flow_err.message :
17868                                         "Mark action validate check fail");
17869                                 if (dev_conf->dv_xmeta_en !=
17870                                         MLX5_XMETA_MODE_LEGACY)
17871                                         return -rte_mtr_error_set(error,
17872                                         ENOTSUP,
17873                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17874                                         NULL, "Extend MARK action is "
17875                                         "not supported. Please try use "
17876                                         "default policy for meter.");
17877                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17878                                 ++actions_n;
17879                                 break;
17880                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17881                                 ret = flow_dv_validate_action_set_tag(dev,
17882                                                         act, action_flags[i],
17883                                                         attr, &flow_err);
17884                                 if (ret)
17885                                         return -rte_mtr_error_set(error,
17886                                         ENOTSUP,
17887                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17888                                         NULL, flow_err.message ?
17889                                         flow_err.message :
17890                                         "Set tag action validate check fail");
17891                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17892                                 ++actions_n;
17893                                 break;
17894                         case RTE_FLOW_ACTION_TYPE_DROP:
17895                                 ret = mlx5_flow_validate_action_drop
17896                                         (action_flags[i], attr, &flow_err);
17897                                 if (ret < 0)
17898                                         return -rte_mtr_error_set(error,
17899                                         ENOTSUP,
17900                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17901                                         NULL, flow_err.message ?
17902                                         flow_err.message :
17903                                         "Drop action validate check fail");
17904                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17905                                 ++actions_n;
17906                                 break;
17907                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17908                                 /*
17909                                  * Check whether extensive
17910                                  * metadata feature is engaged.
17911                                  */
17912                                 if (dev_conf->dv_flow_en &&
17913                                     (dev_conf->dv_xmeta_en !=
17914                                      MLX5_XMETA_MODE_LEGACY) &&
17915                                     mlx5_flow_ext_mreg_supported(dev))
17916                                         return -rte_mtr_error_set(error,
17917                                           ENOTSUP,
17918                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17919                                           NULL, "Queue action with meta "
17920                                           "is not supported. Please try use "
17921                                           "default policy for meter.");
17922                                 ret = mlx5_flow_validate_action_queue(act,
17923                                                         action_flags[i], dev,
17924                                                         attr, &flow_err);
17925                                 if (ret < 0)
17926                                         return -rte_mtr_error_set(error,
17927                                           ENOTSUP,
17928                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17929                                           NULL, flow_err.message ?
17930                                           flow_err.message :
17931                                           "Queue action validate check fail");
17932                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17933                                 ++actions_n;
17934                                 break;
17935                         case RTE_FLOW_ACTION_TYPE_RSS:
17936                                 if (dev_conf->dv_flow_en &&
17937                                     (dev_conf->dv_xmeta_en !=
17938                                      MLX5_XMETA_MODE_LEGACY) &&
17939                                     mlx5_flow_ext_mreg_supported(dev))
17940                                         return -rte_mtr_error_set(error,
17941                                           ENOTSUP,
17942                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17943                                           NULL, "RSS action with meta "
17944                                           "is not supported. Please try use "
17945                                           "default policy for meter.");
17946                                 ret = mlx5_validate_action_rss(dev, act,
17947                                                                &flow_err);
17948                                 if (ret < 0)
17949                                         return -rte_mtr_error_set(error,
17950                                           ENOTSUP,
17951                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17952                                           NULL, flow_err.message ?
17953                                           flow_err.message :
17954                                           "RSS action validate check fail");
17955                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17956                                 ++actions_n;
17957                                 /* Either G or Y will set the RSS. */
17958                                 rss_color[i] = act->conf;
17959                                 break;
17960                         case RTE_FLOW_ACTION_TYPE_JUMP:
17961                                 ret = flow_dv_validate_action_jump(dev,
17962                                         NULL, act, action_flags[i],
17963                                         attr, true, &flow_err);
17964                                 if (ret)
17965                                         return -rte_mtr_error_set(error,
17966                                           ENOTSUP,
17967                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17968                                           NULL, flow_err.message ?
17969                                           flow_err.message :
17970                                           "Jump action validate check fail");
17971                                 ++actions_n;
17972                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17973                                 break;
17974                         /*
17975                          * Only the last meter in the hierarchy will support
17976                          * the YELLOW color steering. Then in the meter policy
17977                          * actions list, there should be no other meter inside.
17978                          */
17979                         case RTE_FLOW_ACTION_TYPE_METER:
17980                                 if (i != RTE_COLOR_GREEN)
17981                                         return -rte_mtr_error_set(error,
17982                                                 ENOTSUP,
17983                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17984                                                 NULL,
17985                                                 "Meter hierarchy only supports GREEN color.");
17986                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17987                                         return -rte_mtr_error_set(error,
17988                                                 ENOTSUP,
17989                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17990                                                 NULL,
17991                                                 "No yellow policy should be provided in meter hierarchy.");
17992                                 mtr = act->conf;
17993                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17994                                                         mtr->mtr_id,
17995                                                         action_flags[i],
17996                                                         is_rss,
17997                                                         &hierarchy_domain,
17998                                                         error);
17999                                 if (ret)
18000                                         return ret;
18001                                 ++actions_n;
18002                                 action_flags[i] |=
18003                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18004                                 break;
18005                         default:
18006                                 return -rte_mtr_error_set(error, ENOTSUP,
18007                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18008                                         NULL,
18009                                         "Doesn't support optional action");
18010                         }
18011                 }
18012                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18013                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18014                 } else if ((action_flags[i] &
18015                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18016                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18017                         /*
18018                          * Only support MLX5_XMETA_MODE_LEGACY
18019                          * so MARK action is only in ingress domain.
18020                          */
18021                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18022                 } else {
18023                         domain_color[i] = def_domain;
18024                         if (action_flags[i] &&
18025                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18026                                 domain_color[i] &=
18027                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18028                 }
18029                 if (action_flags[i] &
18030                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18031                         domain_color[i] &= hierarchy_domain;
18032                 /*
18033                  * Non-termination actions only support NIC Tx domain.
18034                  * The adjustion should be skipped when there is no
18035                  * action or only END is provided. The default domains
18036                  * bit-mask is set to find the MIN intersection.
18037                  * The action flags checking should also be skipped.
18038                  */
18039                 if ((def_green && i == RTE_COLOR_GREEN) ||
18040                     (def_yellow && i == RTE_COLOR_YELLOW))
18041                         continue;
18042                 /*
18043                  * Validate the drop action mutual exclusion
18044                  * with other actions. Drop action is mutually-exclusive
18045                  * with any other action, except for Count action.
18046                  */
18047                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18048                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18049                         return -rte_mtr_error_set(error, ENOTSUP,
18050                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18051                                 NULL, "Drop action is mutually-exclusive "
18052                                 "with any other action");
18053                 }
18054                 /* Eswitch has few restrictions on using items and actions */
18055                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18056                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18057                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18058                                 return -rte_mtr_error_set(error, ENOTSUP,
18059                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18060                                         NULL, "unsupported action MARK");
18061                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18062                                 return -rte_mtr_error_set(error, ENOTSUP,
18063                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18064                                         NULL, "unsupported action QUEUE");
18065                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18066                                 return -rte_mtr_error_set(error, ENOTSUP,
18067                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18068                                         NULL, "unsupported action RSS");
18069                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18070                                 return -rte_mtr_error_set(error, ENOTSUP,
18071                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18072                                         NULL, "no fate action is found");
18073                 } else {
18074                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18075                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18076                                 if ((domain_color[i] &
18077                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18078                                         domain_color[i] =
18079                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18080                                 else
18081                                         return -rte_mtr_error_set(error,
18082                                                 ENOTSUP,
18083                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18084                                                 NULL,
18085                                                 "no fate action is found");
18086                         }
18087                 }
18088         }
18089         /* If both colors have RSS, the attributes should be the same. */
18090         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18091                                            rss_color[RTE_COLOR_YELLOW]))
18092                 return -rte_mtr_error_set(error, EINVAL,
18093                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18094                                           NULL, "policy RSS attr conflict");
18095         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18096                 *is_rss = true;
18097         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18098         if (!def_green && !def_yellow &&
18099             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18100             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18101             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18102                 return -rte_mtr_error_set(error, EINVAL,
18103                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18104                                           NULL, "policy domains conflict");
18105         /*
18106          * At least one color policy is listed in the actions, the domains
18107          * to be supported should be the intersection.
18108          */
18109         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18110                          domain_color[RTE_COLOR_YELLOW];
18111         return 0;
18112 }
18113
18114 static int
18115 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18116 {
18117         struct mlx5_priv *priv = dev->data->dev_private;
18118         int ret = 0;
18119
18120         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18121                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18122                                                 flags);
18123                 if (ret != 0)
18124                         return ret;
18125         }
18126         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18127                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18128                 if (ret != 0)
18129                         return ret;
18130         }
18131         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18132                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18133                 if (ret != 0)
18134                         return ret;
18135         }
18136         return 0;
18137 }
18138
18139 /**
18140  * Discover the number of available flow priorities
18141  * by trying to create a flow with the highest priority value
18142  * for each possible number.
18143  *
18144  * @param[in] dev
18145  *   Ethernet device.
18146  * @param[in] vprio
18147  *   List of possible number of available priorities.
18148  * @param[in] vprio_n
18149  *   Size of @p vprio array.
18150  * @return
18151  *   On success, number of available flow priorities.
18152  *   On failure, a negative errno-style code and rte_errno is set.
18153  */
18154 static int
18155 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18156                             const uint16_t *vprio, int vprio_n)
18157 {
18158         struct mlx5_priv *priv = dev->data->dev_private;
18159         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18160         struct rte_flow_item_eth eth;
18161         struct rte_flow_item item = {
18162                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18163                 .spec = &eth,
18164                 .mask = &eth,
18165         };
18166         struct mlx5_flow_dv_matcher matcher = {
18167                 .mask = {
18168                         .size = sizeof(matcher.mask.buf),
18169                 },
18170         };
18171         union mlx5_flow_tbl_key tbl_key;
18172         struct mlx5_flow flow;
18173         void *action;
18174         struct rte_flow_error error;
18175         uint8_t misc_mask;
18176         int i, err, ret = -ENOTSUP;
18177
18178         /*
18179          * Prepare a flow with a catch-all pattern and a drop action.
18180          * Use drop queue, because shared drop action may be unavailable.
18181          */
18182         action = priv->drop_queue.hrxq->action;
18183         if (action == NULL) {
18184                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18185                 rte_errno = ENOTSUP;
18186                 return -rte_errno;
18187         }
18188         memset(&flow, 0, sizeof(flow));
18189         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18190         if (flow.handle == NULL) {
18191                 DRV_LOG(ERR, "Cannot create flow handle");
18192                 rte_errno = ENOMEM;
18193                 return -rte_errno;
18194         }
18195         flow.ingress = true;
18196         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18197         flow.dv.actions[0] = action;
18198         flow.dv.actions_n = 1;
18199         memset(&eth, 0, sizeof(eth));
18200         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18201                                    &item, /* inner */ false, /* group */ 0);
18202         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18203         for (i = 0; i < vprio_n; i++) {
18204                 /* Configure the next proposed maximum priority. */
18205                 matcher.priority = vprio[i] - 1;
18206                 memset(&tbl_key, 0, sizeof(tbl_key));
18207                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18208                                                /* tunnel */ NULL,
18209                                                /* group */ 0,
18210                                                &error);
18211                 if (err != 0) {
18212                         /* This action is pure SW and must always succeed. */
18213                         DRV_LOG(ERR, "Cannot register matcher");
18214                         ret = -rte_errno;
18215                         break;
18216                 }
18217                 /* Try to apply the flow to HW. */
18218                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18219                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18220                 err = mlx5_flow_os_create_flow
18221                                 (flow.handle->dvh.matcher->matcher_object,
18222                                  (void *)&flow.dv.value, flow.dv.actions_n,
18223                                  flow.dv.actions, &flow.handle->drv_flow);
18224                 if (err == 0) {
18225                         claim_zero(mlx5_flow_os_destroy_flow
18226                                                 (flow.handle->drv_flow));
18227                         flow.handle->drv_flow = NULL;
18228                 }
18229                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18230                 if (err != 0)
18231                         break;
18232                 ret = vprio[i];
18233         }
18234         mlx5_ipool_free(pool, flow.handle_idx);
18235         /* Set rte_errno if no expected priority value matched. */
18236         if (ret < 0)
18237                 rte_errno = -ret;
18238         return ret;
18239 }
18240
18241 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18242         .validate = flow_dv_validate,
18243         .prepare = flow_dv_prepare,
18244         .translate = flow_dv_translate,
18245         .apply = flow_dv_apply,
18246         .remove = flow_dv_remove,
18247         .destroy = flow_dv_destroy,
18248         .query = flow_dv_query,
18249         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18250         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18251         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18252         .create_meter = flow_dv_mtr_alloc,
18253         .free_meter = flow_dv_aso_mtr_release_to_pool,
18254         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18255         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18256         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18257         .create_policy_rules = flow_dv_create_policy_rules,
18258         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18259         .create_def_policy = flow_dv_create_def_policy,
18260         .destroy_def_policy = flow_dv_destroy_def_policy,
18261         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18262         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18263         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18264         .counter_alloc = flow_dv_counter_allocate,
18265         .counter_free = flow_dv_counter_free,
18266         .counter_query = flow_dv_counter_query,
18267         .get_aged_flows = flow_dv_get_aged_flows,
18268         .action_validate = flow_dv_action_validate,
18269         .action_create = flow_dv_action_create,
18270         .action_destroy = flow_dv_action_destroy,
18271         .action_update = flow_dv_action_update,
18272         .action_query = flow_dv_action_query,
18273         .sync_domain = flow_dv_sync_domain,
18274         .discover_priorities = flow_dv_discover_priorities,
18275         .item_create = flow_dv_item_create,
18276         .item_release = flow_dv_item_release,
18277 };
18278
18279 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18280