net/mlx5: set flow error for hash list create
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free,
316                      struct rte_flow_error *error)
317 {
318         struct mlx5_hlist *hl;
319         struct mlx5_hlist *expected = NULL;
320         char s[MLX5_NAME_SIZE];
321
322         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
323         if (likely(hl))
324                 return hl;
325         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
326         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
327                         ctx, cb_create, cb_match, cb_remove, cb_clone,
328                         cb_clone_free);
329         if (!hl) {
330                 DRV_LOG(ERR, "%s hash creation failed", name);
331                 rte_flow_error_set(error, ENOMEM,
332                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
333                                    "cannot allocate resource memory");
334                 return NULL;
335         }
336         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
337                                          __ATOMIC_SEQ_CST,
338                                          __ATOMIC_SEQ_CST)) {
339                 mlx5_hlist_destroy(hl);
340                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
341         }
342         return hl;
343 }
344
345 /* Update VLAN's VID/PCP based on input rte_flow_action.
346  *
347  * @param[in] action
348  *   Pointer to struct rte_flow_action.
349  * @param[out] vlan
350  *   Pointer to struct rte_vlan_hdr.
351  */
352 static void
353 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
354                          struct rte_vlan_hdr *vlan)
355 {
356         uint16_t vlan_tci;
357         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
358                 vlan_tci =
359                     ((const struct rte_flow_action_of_set_vlan_pcp *)
360                                                action->conf)->vlan_pcp;
361                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
363                 vlan->vlan_tci |= vlan_tci;
364         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
365                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
366                 vlan->vlan_tci |= rte_be_to_cpu_16
367                     (((const struct rte_flow_action_of_set_vlan_vid *)
368                                              action->conf)->vlan_vid);
369         }
370 }
371
372 /**
373  * Fetch 1, 2, 3 or 4 byte field from the byte array
374  * and return as unsigned integer in host-endian format.
375  *
376  * @param[in] data
377  *   Pointer to data array.
378  * @param[in] size
379  *   Size of field to extract.
380  *
381  * @return
382  *   converted field in host endian format.
383  */
384 static inline uint32_t
385 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
386 {
387         uint32_t ret;
388
389         switch (size) {
390         case 1:
391                 ret = *data;
392                 break;
393         case 2:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 break;
396         case 3:
397                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
398                 ret = (ret << 8) | *(data + sizeof(uint16_t));
399                 break;
400         case 4:
401                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
402                 break;
403         default:
404                 MLX5_ASSERT(false);
405                 ret = 0;
406                 break;
407         }
408         return ret;
409 }
410
411 /**
412  * Convert modify-header action to DV specification.
413  *
414  * Data length of each action is determined by provided field description
415  * and the item mask. Data bit offset and width of each action is determined
416  * by provided item mask.
417  *
418  * @param[in] item
419  *   Pointer to item specification.
420  * @param[in] field
421  *   Pointer to field modification information.
422  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
423  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
424  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
425  * @param[in] dcopy
426  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
427  *   Negative offset value sets the same offset as source offset.
428  *   size field is ignored, value is taken from source field.
429  * @param[in,out] resource
430  *   Pointer to the modify-header resource.
431  * @param[in] type
432  *   Type of modification.
433  * @param[out] error
434  *   Pointer to the error structure.
435  *
436  * @return
437  *   0 on success, a negative errno value otherwise and rte_errno is set.
438  */
439 static int
440 flow_dv_convert_modify_action(struct rte_flow_item *item,
441                               struct field_modify_info *field,
442                               struct field_modify_info *dcopy,
443                               struct mlx5_flow_dv_modify_hdr_resource *resource,
444                               uint32_t type, struct rte_flow_error *error)
445 {
446         uint32_t i = resource->actions_num;
447         struct mlx5_modification_cmd *actions = resource->actions;
448         uint32_t carry_b = 0;
449
450         /*
451          * The item and mask are provided in big-endian format.
452          * The fields should be presented as in big-endian format either.
453          * Mask must be always present, it defines the actual field width.
454          */
455         MLX5_ASSERT(item->mask);
456         MLX5_ASSERT(field->size);
457         do {
458                 uint32_t size_b;
459                 uint32_t off_b;
460                 uint32_t mask;
461                 uint32_t data;
462                 bool next_field = true;
463                 bool next_dcopy = true;
464
465                 if (i >= MLX5_MAX_MODIFY_NUM)
466                         return rte_flow_error_set(error, EINVAL,
467                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
468                                  "too many items to modify");
469                 /* Fetch variable byte size mask from the array. */
470                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
471                                            field->offset, field->size);
472                 if (!mask) {
473                         ++field;
474                         continue;
475                 }
476                 /* Deduce actual data width in bits from mask value. */
477                 off_b = rte_bsf32(mask) + carry_b;
478                 size_b = sizeof(uint32_t) * CHAR_BIT -
479                          off_b - __builtin_clz(mask);
480                 MLX5_ASSERT(size_b);
481                 actions[i] = (struct mlx5_modification_cmd) {
482                         .action_type = type,
483                         .field = field->id,
484                         .offset = off_b,
485                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
486                                 0 : size_b,
487                 };
488                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
489                         MLX5_ASSERT(dcopy);
490                         actions[i].dst_field = dcopy->id;
491                         actions[i].dst_offset =
492                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
493                         /* Convert entire record to big-endian format. */
494                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
495                         /*
496                          * Destination field overflow. Copy leftovers of
497                          * a source field to the next destination field.
498                          */
499                         carry_b = 0;
500                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
501                             dcopy->size != 0) {
502                                 actions[i].length =
503                                         dcopy->size * CHAR_BIT - dcopy->offset;
504                                 carry_b = actions[i].length;
505                                 next_field = false;
506                         }
507                         /*
508                          * Not enough bits in a source filed to fill a
509                          * destination field. Switch to the next source.
510                          */
511                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
512                             (size_b == field->size * CHAR_BIT - off_b)) {
513                                 actions[i].length =
514                                         field->size * CHAR_BIT - off_b;
515                                 dcopy->offset += actions[i].length;
516                                 next_dcopy = false;
517                         }
518                         if (next_dcopy)
519                                 ++dcopy;
520                 } else {
521                         MLX5_ASSERT(item->spec);
522                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
523                                                    field->offset, field->size);
524                         /* Shift out the trailing masked bits from data. */
525                         data = (data & mask) >> off_b;
526                         actions[i].data1 = rte_cpu_to_be_32(data);
527                 }
528                 /* Convert entire record to expected big-endian format. */
529                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
530                 if (next_field)
531                         ++field;
532                 ++i;
533         } while (field->size);
534         if (resource->actions_num == i)
535                 return rte_flow_error_set(error, EINVAL,
536                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
537                                           "invalid modification flow item");
538         resource->actions_num = i;
539         return 0;
540 }
541
542 /**
543  * Convert modify-header set IPv4 address action to DV specification.
544  *
545  * @param[in,out] resource
546  *   Pointer to the modify-header resource.
547  * @param[in] action
548  *   Pointer to action specification.
549  * @param[out] error
550  *   Pointer to the error structure.
551  *
552  * @return
553  *   0 on success, a negative errno value otherwise and rte_errno is set.
554  */
555 static int
556 flow_dv_convert_action_modify_ipv4
557                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
558                          const struct rte_flow_action *action,
559                          struct rte_flow_error *error)
560 {
561         const struct rte_flow_action_set_ipv4 *conf =
562                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
563         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
564         struct rte_flow_item_ipv4 ipv4;
565         struct rte_flow_item_ipv4 ipv4_mask;
566
567         memset(&ipv4, 0, sizeof(ipv4));
568         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
569         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
570                 ipv4.hdr.src_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
572         } else {
573                 ipv4.hdr.dst_addr = conf->ipv4_addr;
574                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
575         }
576         item.spec = &ipv4;
577         item.mask = &ipv4_mask;
578         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
579                                              MLX5_MODIFICATION_TYPE_SET, error);
580 }
581
582 /**
583  * Convert modify-header set IPv6 address action to DV specification.
584  *
585  * @param[in,out] resource
586  *   Pointer to the modify-header resource.
587  * @param[in] action
588  *   Pointer to action specification.
589  * @param[out] error
590  *   Pointer to the error structure.
591  *
592  * @return
593  *   0 on success, a negative errno value otherwise and rte_errno is set.
594  */
595 static int
596 flow_dv_convert_action_modify_ipv6
597                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
598                          const struct rte_flow_action *action,
599                          struct rte_flow_error *error)
600 {
601         const struct rte_flow_action_set_ipv6 *conf =
602                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
603         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
604         struct rte_flow_item_ipv6 ipv6;
605         struct rte_flow_item_ipv6 ipv6_mask;
606
607         memset(&ipv6, 0, sizeof(ipv6));
608         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
609         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
610                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
611                        sizeof(ipv6.hdr.src_addr));
612                 memcpy(&ipv6_mask.hdr.src_addr,
613                        &rte_flow_item_ipv6_mask.hdr.src_addr,
614                        sizeof(ipv6.hdr.src_addr));
615         } else {
616                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618                 memcpy(&ipv6_mask.hdr.dst_addr,
619                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
620                        sizeof(ipv6.hdr.dst_addr));
621         }
622         item.spec = &ipv6;
623         item.mask = &ipv6_mask;
624         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
625                                              MLX5_MODIFICATION_TYPE_SET, error);
626 }
627
628 /**
629  * Convert modify-header set MAC address action to DV specification.
630  *
631  * @param[in,out] resource
632  *   Pointer to the modify-header resource.
633  * @param[in] action
634  *   Pointer to action specification.
635  * @param[out] error
636  *   Pointer to the error structure.
637  *
638  * @return
639  *   0 on success, a negative errno value otherwise and rte_errno is set.
640  */
641 static int
642 flow_dv_convert_action_modify_mac
643                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
644                          const struct rte_flow_action *action,
645                          struct rte_flow_error *error)
646 {
647         const struct rte_flow_action_set_mac *conf =
648                 (const struct rte_flow_action_set_mac *)(action->conf);
649         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
650         struct rte_flow_item_eth eth;
651         struct rte_flow_item_eth eth_mask;
652
653         memset(&eth, 0, sizeof(eth));
654         memset(&eth_mask, 0, sizeof(eth_mask));
655         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
656                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
657                        sizeof(eth.src.addr_bytes));
658                 memcpy(&eth_mask.src.addr_bytes,
659                        &rte_flow_item_eth_mask.src.addr_bytes,
660                        sizeof(eth_mask.src.addr_bytes));
661         } else {
662                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
663                        sizeof(eth.dst.addr_bytes));
664                 memcpy(&eth_mask.dst.addr_bytes,
665                        &rte_flow_item_eth_mask.dst.addr_bytes,
666                        sizeof(eth_mask.dst.addr_bytes));
667         }
668         item.spec = &eth;
669         item.mask = &eth_mask;
670         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
671                                              MLX5_MODIFICATION_TYPE_SET, error);
672 }
673
674 /**
675  * Convert modify-header set VLAN VID action to DV specification.
676  *
677  * @param[in,out] resource
678  *   Pointer to the modify-header resource.
679  * @param[in] action
680  *   Pointer to action specification.
681  * @param[out] error
682  *   Pointer to the error structure.
683  *
684  * @return
685  *   0 on success, a negative errno value otherwise and rte_errno is set.
686  */
687 static int
688 flow_dv_convert_action_modify_vlan_vid
689                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
690                          const struct rte_flow_action *action,
691                          struct rte_flow_error *error)
692 {
693         const struct rte_flow_action_of_set_vlan_vid *conf =
694                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
695         int i = resource->actions_num;
696         struct mlx5_modification_cmd *actions = resource->actions;
697         struct field_modify_info *field = modify_vlan_out_first_vid;
698
699         if (i >= MLX5_MAX_MODIFY_NUM)
700                 return rte_flow_error_set(error, EINVAL,
701                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
702                          "too many items to modify");
703         actions[i] = (struct mlx5_modification_cmd) {
704                 .action_type = MLX5_MODIFICATION_TYPE_SET,
705                 .field = field->id,
706                 .length = field->size,
707                 .offset = field->offset,
708         };
709         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
710         actions[i].data1 = conf->vlan_vid;
711         actions[i].data1 = actions[i].data1 << 16;
712         resource->actions_num = ++i;
713         return 0;
714 }
715
716 /**
717  * Convert modify-header set TP action to DV specification.
718  *
719  * @param[in,out] resource
720  *   Pointer to the modify-header resource.
721  * @param[in] action
722  *   Pointer to action specification.
723  * @param[in] items
724  *   Pointer to rte_flow_item objects list.
725  * @param[in] attr
726  *   Pointer to flow attributes structure.
727  * @param[in] dev_flow
728  *   Pointer to the sub flow.
729  * @param[in] tunnel_decap
730  *   Whether action is after tunnel decapsulation.
731  * @param[out] error
732  *   Pointer to the error structure.
733  *
734  * @return
735  *   0 on success, a negative errno value otherwise and rte_errno is set.
736  */
737 static int
738 flow_dv_convert_action_modify_tp
739                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
740                          const struct rte_flow_action *action,
741                          const struct rte_flow_item *items,
742                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
743                          bool tunnel_decap, struct rte_flow_error *error)
744 {
745         const struct rte_flow_action_set_tp *conf =
746                 (const struct rte_flow_action_set_tp *)(action->conf);
747         struct rte_flow_item item;
748         struct rte_flow_item_udp udp;
749         struct rte_flow_item_udp udp_mask;
750         struct rte_flow_item_tcp tcp;
751         struct rte_flow_item_tcp tcp_mask;
752         struct field_modify_info *field;
753
754         if (!attr->valid)
755                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
756         if (attr->udp) {
757                 memset(&udp, 0, sizeof(udp));
758                 memset(&udp_mask, 0, sizeof(udp_mask));
759                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
760                         udp.hdr.src_port = conf->port;
761                         udp_mask.hdr.src_port =
762                                         rte_flow_item_udp_mask.hdr.src_port;
763                 } else {
764                         udp.hdr.dst_port = conf->port;
765                         udp_mask.hdr.dst_port =
766                                         rte_flow_item_udp_mask.hdr.dst_port;
767                 }
768                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
769                 item.spec = &udp;
770                 item.mask = &udp_mask;
771                 field = modify_udp;
772         } else {
773                 MLX5_ASSERT(attr->tcp);
774                 memset(&tcp, 0, sizeof(tcp));
775                 memset(&tcp_mask, 0, sizeof(tcp_mask));
776                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
777                         tcp.hdr.src_port = conf->port;
778                         tcp_mask.hdr.src_port =
779                                         rte_flow_item_tcp_mask.hdr.src_port;
780                 } else {
781                         tcp.hdr.dst_port = conf->port;
782                         tcp_mask.hdr.dst_port =
783                                         rte_flow_item_tcp_mask.hdr.dst_port;
784                 }
785                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
786                 item.spec = &tcp;
787                 item.mask = &tcp_mask;
788                 field = modify_tcp;
789         }
790         return flow_dv_convert_modify_action(&item, field, NULL, resource,
791                                              MLX5_MODIFICATION_TYPE_SET, error);
792 }
793
794 /**
795  * Convert modify-header set TTL action to DV specification.
796  *
797  * @param[in,out] resource
798  *   Pointer to the modify-header resource.
799  * @param[in] action
800  *   Pointer to action specification.
801  * @param[in] items
802  *   Pointer to rte_flow_item objects list.
803  * @param[in] attr
804  *   Pointer to flow attributes structure.
805  * @param[in] dev_flow
806  *   Pointer to the sub flow.
807  * @param[in] tunnel_decap
808  *   Whether action is after tunnel decapsulation.
809  * @param[out] error
810  *   Pointer to the error structure.
811  *
812  * @return
813  *   0 on success, a negative errno value otherwise and rte_errno is set.
814  */
815 static int
816 flow_dv_convert_action_modify_ttl
817                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
818                          const struct rte_flow_action *action,
819                          const struct rte_flow_item *items,
820                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
821                          bool tunnel_decap, struct rte_flow_error *error)
822 {
823         const struct rte_flow_action_set_ttl *conf =
824                 (const struct rte_flow_action_set_ttl *)(action->conf);
825         struct rte_flow_item item;
826         struct rte_flow_item_ipv4 ipv4;
827         struct rte_flow_item_ipv4 ipv4_mask;
828         struct rte_flow_item_ipv6 ipv6;
829         struct rte_flow_item_ipv6 ipv6_mask;
830         struct field_modify_info *field;
831
832         if (!attr->valid)
833                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
834         if (attr->ipv4) {
835                 memset(&ipv4, 0, sizeof(ipv4));
836                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
837                 ipv4.hdr.time_to_live = conf->ttl_value;
838                 ipv4_mask.hdr.time_to_live = 0xFF;
839                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
840                 item.spec = &ipv4;
841                 item.mask = &ipv4_mask;
842                 field = modify_ipv4;
843         } else {
844                 MLX5_ASSERT(attr->ipv6);
845                 memset(&ipv6, 0, sizeof(ipv6));
846                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
847                 ipv6.hdr.hop_limits = conf->ttl_value;
848                 ipv6_mask.hdr.hop_limits = 0xFF;
849                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
850                 item.spec = &ipv6;
851                 item.mask = &ipv6_mask;
852                 field = modify_ipv6;
853         }
854         return flow_dv_convert_modify_action(&item, field, NULL, resource,
855                                              MLX5_MODIFICATION_TYPE_SET, error);
856 }
857
858 /**
859  * Convert modify-header decrement TTL action to DV specification.
860  *
861  * @param[in,out] resource
862  *   Pointer to the modify-header resource.
863  * @param[in] action
864  *   Pointer to action specification.
865  * @param[in] items
866  *   Pointer to rte_flow_item objects list.
867  * @param[in] attr
868  *   Pointer to flow attributes structure.
869  * @param[in] dev_flow
870  *   Pointer to the sub flow.
871  * @param[in] tunnel_decap
872  *   Whether action is after tunnel decapsulation.
873  * @param[out] error
874  *   Pointer to the error structure.
875  *
876  * @return
877  *   0 on success, a negative errno value otherwise and rte_errno is set.
878  */
879 static int
880 flow_dv_convert_action_modify_dec_ttl
881                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
882                          const struct rte_flow_item *items,
883                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
884                          bool tunnel_decap, struct rte_flow_error *error)
885 {
886         struct rte_flow_item item;
887         struct rte_flow_item_ipv4 ipv4;
888         struct rte_flow_item_ipv4 ipv4_mask;
889         struct rte_flow_item_ipv6 ipv6;
890         struct rte_flow_item_ipv6 ipv6_mask;
891         struct field_modify_info *field;
892
893         if (!attr->valid)
894                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
895         if (attr->ipv4) {
896                 memset(&ipv4, 0, sizeof(ipv4));
897                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
898                 ipv4.hdr.time_to_live = 0xFF;
899                 ipv4_mask.hdr.time_to_live = 0xFF;
900                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
901                 item.spec = &ipv4;
902                 item.mask = &ipv4_mask;
903                 field = modify_ipv4;
904         } else {
905                 MLX5_ASSERT(attr->ipv6);
906                 memset(&ipv6, 0, sizeof(ipv6));
907                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
908                 ipv6.hdr.hop_limits = 0xFF;
909                 ipv6_mask.hdr.hop_limits = 0xFF;
910                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
911                 item.spec = &ipv6;
912                 item.mask = &ipv6_mask;
913                 field = modify_ipv6;
914         }
915         return flow_dv_convert_modify_action(&item, field, NULL, resource,
916                                              MLX5_MODIFICATION_TYPE_ADD, error);
917 }
918
919 /**
920  * Convert modify-header increment/decrement TCP Sequence number
921  * to DV specification.
922  *
923  * @param[in,out] resource
924  *   Pointer to the modify-header resource.
925  * @param[in] action
926  *   Pointer to action specification.
927  * @param[out] error
928  *   Pointer to the error structure.
929  *
930  * @return
931  *   0 on success, a negative errno value otherwise and rte_errno is set.
932  */
933 static int
934 flow_dv_convert_action_modify_tcp_seq
935                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
936                          const struct rte_flow_action *action,
937                          struct rte_flow_error *error)
938 {
939         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
940         uint64_t value = rte_be_to_cpu_32(*conf);
941         struct rte_flow_item item;
942         struct rte_flow_item_tcp tcp;
943         struct rte_flow_item_tcp tcp_mask;
944
945         memset(&tcp, 0, sizeof(tcp));
946         memset(&tcp_mask, 0, sizeof(tcp_mask));
947         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
948                 /*
949                  * The HW has no decrement operation, only increment operation.
950                  * To simulate decrement X from Y using increment operation
951                  * we need to add UINT32_MAX X times to Y.
952                  * Each adding of UINT32_MAX decrements Y by 1.
953                  */
954                 value *= UINT32_MAX;
955         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
956         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
957         item.type = RTE_FLOW_ITEM_TYPE_TCP;
958         item.spec = &tcp;
959         item.mask = &tcp_mask;
960         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
961                                              MLX5_MODIFICATION_TYPE_ADD, error);
962 }
963
964 /**
965  * Convert modify-header increment/decrement TCP Acknowledgment number
966  * to DV specification.
967  *
968  * @param[in,out] resource
969  *   Pointer to the modify-header resource.
970  * @param[in] action
971  *   Pointer to action specification.
972  * @param[out] error
973  *   Pointer to the error structure.
974  *
975  * @return
976  *   0 on success, a negative errno value otherwise and rte_errno is set.
977  */
978 static int
979 flow_dv_convert_action_modify_tcp_ack
980                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
981                          const struct rte_flow_action *action,
982                          struct rte_flow_error *error)
983 {
984         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
985         uint64_t value = rte_be_to_cpu_32(*conf);
986         struct rte_flow_item item;
987         struct rte_flow_item_tcp tcp;
988         struct rte_flow_item_tcp tcp_mask;
989
990         memset(&tcp, 0, sizeof(tcp));
991         memset(&tcp_mask, 0, sizeof(tcp_mask));
992         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
993                 /*
994                  * The HW has no decrement operation, only increment operation.
995                  * To simulate decrement X from Y using increment operation
996                  * we need to add UINT32_MAX X times to Y.
997                  * Each adding of UINT32_MAX decrements Y by 1.
998                  */
999                 value *= UINT32_MAX;
1000         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
1001         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
1002         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1003         item.spec = &tcp;
1004         item.mask = &tcp_mask;
1005         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1006                                              MLX5_MODIFICATION_TYPE_ADD, error);
1007 }
1008
1009 static enum mlx5_modification_field reg_to_field[] = {
1010         [REG_NON] = MLX5_MODI_OUT_NONE,
1011         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1012         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1013         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1014         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1015         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1016         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1017         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1018         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1019         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1020         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1021 };
1022
1023 /**
1024  * Convert register set to DV specification.
1025  *
1026  * @param[in,out] resource
1027  *   Pointer to the modify-header resource.
1028  * @param[in] action
1029  *   Pointer to action specification.
1030  * @param[out] error
1031  *   Pointer to the error structure.
1032  *
1033  * @return
1034  *   0 on success, a negative errno value otherwise and rte_errno is set.
1035  */
1036 static int
1037 flow_dv_convert_action_set_reg
1038                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1039                          const struct rte_flow_action *action,
1040                          struct rte_flow_error *error)
1041 {
1042         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1043         struct mlx5_modification_cmd *actions = resource->actions;
1044         uint32_t i = resource->actions_num;
1045
1046         if (i >= MLX5_MAX_MODIFY_NUM)
1047                 return rte_flow_error_set(error, EINVAL,
1048                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1049                                           "too many items to modify");
1050         MLX5_ASSERT(conf->id != REG_NON);
1051         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1052         actions[i] = (struct mlx5_modification_cmd) {
1053                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1054                 .field = reg_to_field[conf->id],
1055                 .offset = conf->offset,
1056                 .length = conf->length,
1057         };
1058         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1059         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1060         ++i;
1061         resource->actions_num = i;
1062         return 0;
1063 }
1064
1065 /**
1066  * Convert SET_TAG action to DV specification.
1067  *
1068  * @param[in] dev
1069  *   Pointer to the rte_eth_dev structure.
1070  * @param[in,out] resource
1071  *   Pointer to the modify-header resource.
1072  * @param[in] conf
1073  *   Pointer to action specification.
1074  * @param[out] error
1075  *   Pointer to the error structure.
1076  *
1077  * @return
1078  *   0 on success, a negative errno value otherwise and rte_errno is set.
1079  */
1080 static int
1081 flow_dv_convert_action_set_tag
1082                         (struct rte_eth_dev *dev,
1083                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1084                          const struct rte_flow_action_set_tag *conf,
1085                          struct rte_flow_error *error)
1086 {
1087         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1088         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1089         struct rte_flow_item item = {
1090                 .spec = &data,
1091                 .mask = &mask,
1092         };
1093         struct field_modify_info reg_c_x[] = {
1094                 [1] = {0, 0, 0},
1095         };
1096         enum mlx5_modification_field reg_type;
1097         int ret;
1098
1099         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1100         if (ret < 0)
1101                 return ret;
1102         MLX5_ASSERT(ret != REG_NON);
1103         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1104         reg_type = reg_to_field[ret];
1105         MLX5_ASSERT(reg_type > 0);
1106         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1107         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1108                                              MLX5_MODIFICATION_TYPE_SET, error);
1109 }
1110
1111 /**
1112  * Convert internal COPY_REG action to DV specification.
1113  *
1114  * @param[in] dev
1115  *   Pointer to the rte_eth_dev structure.
1116  * @param[in,out] res
1117  *   Pointer to the modify-header resource.
1118  * @param[in] action
1119  *   Pointer to action specification.
1120  * @param[out] error
1121  *   Pointer to the error structure.
1122  *
1123  * @return
1124  *   0 on success, a negative errno value otherwise and rte_errno is set.
1125  */
1126 static int
1127 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1128                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1129                                  const struct rte_flow_action *action,
1130                                  struct rte_flow_error *error)
1131 {
1132         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1133         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1134         struct rte_flow_item item = {
1135                 .spec = NULL,
1136                 .mask = &mask,
1137         };
1138         struct field_modify_info reg_src[] = {
1139                 {4, 0, reg_to_field[conf->src]},
1140                 {0, 0, 0},
1141         };
1142         struct field_modify_info reg_dst = {
1143                 .offset = 0,
1144                 .id = reg_to_field[conf->dst],
1145         };
1146         /* Adjust reg_c[0] usage according to reported mask. */
1147         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1148                 struct mlx5_priv *priv = dev->data->dev_private;
1149                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1150
1151                 MLX5_ASSERT(reg_c0);
1152                 MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
1153                             MLX5_XMETA_MODE_LEGACY);
1154                 if (conf->dst == REG_C_0) {
1155                         /* Copy to reg_c[0], within mask only. */
1156                         reg_dst.offset = rte_bsf32(reg_c0);
1157                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1158                 } else {
1159                         reg_dst.offset = 0;
1160                         mask = rte_cpu_to_be_32(reg_c0);
1161                 }
1162         }
1163         return flow_dv_convert_modify_action(&item,
1164                                              reg_src, &reg_dst, res,
1165                                              MLX5_MODIFICATION_TYPE_COPY,
1166                                              error);
1167 }
1168
1169 /**
1170  * Convert MARK action to DV specification. This routine is used
1171  * in extensive metadata only and requires metadata register to be
1172  * handled. In legacy mode hardware tag resource is engaged.
1173  *
1174  * @param[in] dev
1175  *   Pointer to the rte_eth_dev structure.
1176  * @param[in] conf
1177  *   Pointer to MARK action specification.
1178  * @param[in,out] resource
1179  *   Pointer to the modify-header resource.
1180  * @param[out] error
1181  *   Pointer to the error structure.
1182  *
1183  * @return
1184  *   0 on success, a negative errno value otherwise and rte_errno is set.
1185  */
1186 static int
1187 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1188                             const struct rte_flow_action_mark *conf,
1189                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1190                             struct rte_flow_error *error)
1191 {
1192         struct mlx5_priv *priv = dev->data->dev_private;
1193         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1194                                            priv->sh->dv_mark_mask);
1195         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1196         struct rte_flow_item item = {
1197                 .spec = &data,
1198                 .mask = &mask,
1199         };
1200         struct field_modify_info reg_c_x[] = {
1201                 [1] = {0, 0, 0},
1202         };
1203         int reg;
1204
1205         if (!mask)
1206                 return rte_flow_error_set(error, EINVAL,
1207                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1208                                           NULL, "zero mark action mask");
1209         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1210         if (reg < 0)
1211                 return reg;
1212         MLX5_ASSERT(reg > 0);
1213         if (reg == REG_C_0) {
1214                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1215                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1216
1217                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1218                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1219                 mask = rte_cpu_to_be_32(mask << shl_c0);
1220         }
1221         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1222         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1223                                              MLX5_MODIFICATION_TYPE_SET, error);
1224 }
1225
1226 /**
1227  * Get metadata register index for specified steering domain.
1228  *
1229  * @param[in] dev
1230  *   Pointer to the rte_eth_dev structure.
1231  * @param[in] attr
1232  *   Attributes of flow to determine steering domain.
1233  * @param[out] error
1234  *   Pointer to the error structure.
1235  *
1236  * @return
1237  *   positive index on success, a negative errno value otherwise
1238  *   and rte_errno is set.
1239  */
1240 static enum modify_reg
1241 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1242                          const struct rte_flow_attr *attr,
1243                          struct rte_flow_error *error)
1244 {
1245         int reg =
1246                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1247                                           MLX5_METADATA_FDB :
1248                                             attr->egress ?
1249                                             MLX5_METADATA_TX :
1250                                             MLX5_METADATA_RX, 0, error);
1251         if (reg < 0)
1252                 return rte_flow_error_set(error,
1253                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1254                                           NULL, "unavailable "
1255                                           "metadata register");
1256         return reg;
1257 }
1258
1259 /**
1260  * Convert SET_META action to DV specification.
1261  *
1262  * @param[in] dev
1263  *   Pointer to the rte_eth_dev structure.
1264  * @param[in,out] resource
1265  *   Pointer to the modify-header resource.
1266  * @param[in] attr
1267  *   Attributes of flow that includes this item.
1268  * @param[in] conf
1269  *   Pointer to action specification.
1270  * @param[out] error
1271  *   Pointer to the error structure.
1272  *
1273  * @return
1274  *   0 on success, a negative errno value otherwise and rte_errno is set.
1275  */
1276 static int
1277 flow_dv_convert_action_set_meta
1278                         (struct rte_eth_dev *dev,
1279                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1280                          const struct rte_flow_attr *attr,
1281                          const struct rte_flow_action_set_meta *conf,
1282                          struct rte_flow_error *error)
1283 {
1284         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1285         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1286         struct rte_flow_item item = {
1287                 .spec = &data,
1288                 .mask = &mask,
1289         };
1290         struct field_modify_info reg_c_x[] = {
1291                 [1] = {0, 0, 0},
1292         };
1293         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1294
1295         if (reg < 0)
1296                 return reg;
1297         MLX5_ASSERT(reg != REG_NON);
1298         if (reg == REG_C_0) {
1299                 struct mlx5_priv *priv = dev->data->dev_private;
1300                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1301                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1302
1303                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1304                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1305                 mask = rte_cpu_to_be_32(mask << shl_c0);
1306         }
1307         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1308         /* The routine expects parameters in memory as big-endian ones. */
1309         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1310                                              MLX5_MODIFICATION_TYPE_SET, error);
1311 }
1312
1313 /**
1314  * Convert modify-header set IPv4 DSCP action to DV specification.
1315  *
1316  * @param[in,out] resource
1317  *   Pointer to the modify-header resource.
1318  * @param[in] action
1319  *   Pointer to action specification.
1320  * @param[out] error
1321  *   Pointer to the error structure.
1322  *
1323  * @return
1324  *   0 on success, a negative errno value otherwise and rte_errno is set.
1325  */
1326 static int
1327 flow_dv_convert_action_modify_ipv4_dscp
1328                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1329                          const struct rte_flow_action *action,
1330                          struct rte_flow_error *error)
1331 {
1332         const struct rte_flow_action_set_dscp *conf =
1333                 (const struct rte_flow_action_set_dscp *)(action->conf);
1334         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1335         struct rte_flow_item_ipv4 ipv4;
1336         struct rte_flow_item_ipv4 ipv4_mask;
1337
1338         memset(&ipv4, 0, sizeof(ipv4));
1339         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1340         ipv4.hdr.type_of_service = conf->dscp;
1341         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1342         item.spec = &ipv4;
1343         item.mask = &ipv4_mask;
1344         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1345                                              MLX5_MODIFICATION_TYPE_SET, error);
1346 }
1347
1348 /**
1349  * Convert modify-header set IPv6 DSCP action to DV specification.
1350  *
1351  * @param[in,out] resource
1352  *   Pointer to the modify-header resource.
1353  * @param[in] action
1354  *   Pointer to action specification.
1355  * @param[out] error
1356  *   Pointer to the error structure.
1357  *
1358  * @return
1359  *   0 on success, a negative errno value otherwise and rte_errno is set.
1360  */
1361 static int
1362 flow_dv_convert_action_modify_ipv6_dscp
1363                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1364                          const struct rte_flow_action *action,
1365                          struct rte_flow_error *error)
1366 {
1367         const struct rte_flow_action_set_dscp *conf =
1368                 (const struct rte_flow_action_set_dscp *)(action->conf);
1369         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1370         struct rte_flow_item_ipv6 ipv6;
1371         struct rte_flow_item_ipv6 ipv6_mask;
1372
1373         memset(&ipv6, 0, sizeof(ipv6));
1374         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1375         /*
1376          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1377          * rdma-core only accept the DSCP bits byte aligned start from
1378          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1379          * bits in IPv6 case as rdma-core requires byte aligned value.
1380          */
1381         ipv6.hdr.vtc_flow = conf->dscp;
1382         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1383         item.spec = &ipv6;
1384         item.mask = &ipv6_mask;
1385         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1386                                              MLX5_MODIFICATION_TYPE_SET, error);
1387 }
1388
1389 static int
1390 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1391                            enum rte_flow_field_id field, int inherit,
1392                            const struct rte_flow_attr *attr,
1393                            struct rte_flow_error *error)
1394 {
1395         struct mlx5_priv *priv = dev->data->dev_private;
1396
1397         switch (field) {
1398         case RTE_FLOW_FIELD_START:
1399                 return 32;
1400         case RTE_FLOW_FIELD_MAC_DST:
1401         case RTE_FLOW_FIELD_MAC_SRC:
1402                 return 48;
1403         case RTE_FLOW_FIELD_VLAN_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_VLAN_ID:
1406                 return 12;
1407         case RTE_FLOW_FIELD_MAC_TYPE:
1408                 return 16;
1409         case RTE_FLOW_FIELD_IPV4_DSCP:
1410                 return 6;
1411         case RTE_FLOW_FIELD_IPV4_TTL:
1412                 return 8;
1413         case RTE_FLOW_FIELD_IPV4_SRC:
1414         case RTE_FLOW_FIELD_IPV4_DST:
1415                 return 32;
1416         case RTE_FLOW_FIELD_IPV6_DSCP:
1417                 return 6;
1418         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1419                 return 8;
1420         case RTE_FLOW_FIELD_IPV6_SRC:
1421         case RTE_FLOW_FIELD_IPV6_DST:
1422                 return 128;
1423         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1424         case RTE_FLOW_FIELD_TCP_PORT_DST:
1425                 return 16;
1426         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1427         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1428                 return 32;
1429         case RTE_FLOW_FIELD_TCP_FLAGS:
1430                 return 9;
1431         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1432         case RTE_FLOW_FIELD_UDP_PORT_DST:
1433                 return 16;
1434         case RTE_FLOW_FIELD_VXLAN_VNI:
1435         case RTE_FLOW_FIELD_GENEVE_VNI:
1436                 return 24;
1437         case RTE_FLOW_FIELD_GTP_TEID:
1438         case RTE_FLOW_FIELD_TAG:
1439                 return 32;
1440         case RTE_FLOW_FIELD_MARK:
1441                 return __builtin_popcount(priv->sh->dv_mark_mask);
1442         case RTE_FLOW_FIELD_META:
1443                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1444                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1445         case RTE_FLOW_FIELD_POINTER:
1446         case RTE_FLOW_FIELD_VALUE:
1447                 return inherit < 0 ? 0 : inherit;
1448         default:
1449                 MLX5_ASSERT(false);
1450         }
1451         return 0;
1452 }
1453
1454 static void
1455 mlx5_flow_field_id_to_modify_info
1456                 (const struct rte_flow_action_modify_data *data,
1457                  struct field_modify_info *info, uint32_t *mask,
1458                  uint32_t width, struct rte_eth_dev *dev,
1459                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1460 {
1461         struct mlx5_priv *priv = dev->data->dev_private;
1462         uint32_t idx = 0;
1463         uint32_t off = 0;
1464
1465         switch (data->field) {
1466         case RTE_FLOW_FIELD_START:
1467                 /* not supported yet */
1468                 MLX5_ASSERT(false);
1469                 break;
1470         case RTE_FLOW_FIELD_MAC_DST:
1471                 off = data->offset > 16 ? data->offset - 16 : 0;
1472                 if (mask) {
1473                         if (data->offset < 16) {
1474                                 info[idx] = (struct field_modify_info){2, 4,
1475                                                 MLX5_MODI_OUT_DMAC_15_0};
1476                                 if (width < 16) {
1477                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1478                                                                  (16 - width));
1479                                         width = 0;
1480                                 } else {
1481                                         mask[1] = RTE_BE16(0xffff);
1482                                         width -= 16;
1483                                 }
1484                                 if (!width)
1485                                         break;
1486                                 ++idx;
1487                         }
1488                         info[idx] = (struct field_modify_info){4, 0,
1489                                                 MLX5_MODI_OUT_DMAC_47_16};
1490                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1491                                                     (32 - width)) << off);
1492                 } else {
1493                         if (data->offset < 16)
1494                                 info[idx++] = (struct field_modify_info){2, 0,
1495                                                 MLX5_MODI_OUT_DMAC_15_0};
1496                         info[idx] = (struct field_modify_info){4, off,
1497                                                 MLX5_MODI_OUT_DMAC_47_16};
1498                 }
1499                 break;
1500         case RTE_FLOW_FIELD_MAC_SRC:
1501                 off = data->offset > 16 ? data->offset - 16 : 0;
1502                 if (mask) {
1503                         if (data->offset < 16) {
1504                                 info[idx] = (struct field_modify_info){2, 4,
1505                                                 MLX5_MODI_OUT_SMAC_15_0};
1506                                 if (width < 16) {
1507                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1508                                                                  (16 - width));
1509                                         width = 0;
1510                                 } else {
1511                                         mask[1] = RTE_BE16(0xffff);
1512                                         width -= 16;
1513                                 }
1514                                 if (!width)
1515                                         break;
1516                                 ++idx;
1517                         }
1518                         info[idx] = (struct field_modify_info){4, 0,
1519                                                 MLX5_MODI_OUT_SMAC_47_16};
1520                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1521                                                     (32 - width)) << off);
1522                 } else {
1523                         if (data->offset < 16)
1524                                 info[idx++] = (struct field_modify_info){2, 0,
1525                                                 MLX5_MODI_OUT_SMAC_15_0};
1526                         info[idx] = (struct field_modify_info){4, off,
1527                                                 MLX5_MODI_OUT_SMAC_47_16};
1528                 }
1529                 break;
1530         case RTE_FLOW_FIELD_VLAN_TYPE:
1531                 /* not supported yet */
1532                 break;
1533         case RTE_FLOW_FIELD_VLAN_ID:
1534                 info[idx] = (struct field_modify_info){2, 0,
1535                                         MLX5_MODI_OUT_FIRST_VID};
1536                 if (mask)
1537                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1538                 break;
1539         case RTE_FLOW_FIELD_MAC_TYPE:
1540                 info[idx] = (struct field_modify_info){2, 0,
1541                                         MLX5_MODI_OUT_ETHERTYPE};
1542                 if (mask)
1543                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1544                 break;
1545         case RTE_FLOW_FIELD_IPV4_DSCP:
1546                 info[idx] = (struct field_modify_info){1, 0,
1547                                         MLX5_MODI_OUT_IP_DSCP};
1548                 if (mask)
1549                         mask[idx] = 0x3f >> (6 - width);
1550                 break;
1551         case RTE_FLOW_FIELD_IPV4_TTL:
1552                 info[idx] = (struct field_modify_info){1, 0,
1553                                         MLX5_MODI_OUT_IPV4_TTL};
1554                 if (mask)
1555                         mask[idx] = 0xff >> (8 - width);
1556                 break;
1557         case RTE_FLOW_FIELD_IPV4_SRC:
1558                 info[idx] = (struct field_modify_info){4, 0,
1559                                         MLX5_MODI_OUT_SIPV4};
1560                 if (mask)
1561                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1562                                                      (32 - width));
1563                 break;
1564         case RTE_FLOW_FIELD_IPV4_DST:
1565                 info[idx] = (struct field_modify_info){4, 0,
1566                                         MLX5_MODI_OUT_DIPV4};
1567                 if (mask)
1568                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1569                                                      (32 - width));
1570                 break;
1571         case RTE_FLOW_FIELD_IPV6_DSCP:
1572                 info[idx] = (struct field_modify_info){1, 0,
1573                                         MLX5_MODI_OUT_IP_DSCP};
1574                 if (mask)
1575                         mask[idx] = 0x3f >> (6 - width);
1576                 break;
1577         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1578                 info[idx] = (struct field_modify_info){1, 0,
1579                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1580                 if (mask)
1581                         mask[idx] = 0xff >> (8 - width);
1582                 break;
1583         case RTE_FLOW_FIELD_IPV6_SRC:
1584                 if (mask) {
1585                         if (data->offset < 32) {
1586                                 info[idx] = (struct field_modify_info){4, 12,
1587                                                 MLX5_MODI_OUT_SIPV6_31_0};
1588                                 if (width < 32) {
1589                                         mask[3] =
1590                                                 rte_cpu_to_be_32(0xffffffff >>
1591                                                                  (32 - width));
1592                                         width = 0;
1593                                 } else {
1594                                         mask[3] = RTE_BE32(0xffffffff);
1595                                         width -= 32;
1596                                 }
1597                                 if (!width)
1598                                         break;
1599                                 ++idx;
1600                         }
1601                         if (data->offset < 64) {
1602                                 info[idx] = (struct field_modify_info){4, 8,
1603                                                 MLX5_MODI_OUT_SIPV6_63_32};
1604                                 if (width < 32) {
1605                                         mask[2] =
1606                                                 rte_cpu_to_be_32(0xffffffff >>
1607                                                                  (32 - width));
1608                                         width = 0;
1609                                 } else {
1610                                         mask[2] = RTE_BE32(0xffffffff);
1611                                         width -= 32;
1612                                 }
1613                                 if (!width)
1614                                         break;
1615                                 ++idx;
1616                         }
1617                         if (data->offset < 96) {
1618                                 info[idx] = (struct field_modify_info){4, 4,
1619                                                 MLX5_MODI_OUT_SIPV6_95_64};
1620                                 if (width < 32) {
1621                                         mask[1] =
1622                                                 rte_cpu_to_be_32(0xffffffff >>
1623                                                                  (32 - width));
1624                                         width = 0;
1625                                 } else {
1626                                         mask[1] = RTE_BE32(0xffffffff);
1627                                         width -= 32;
1628                                 }
1629                                 if (!width)
1630                                         break;
1631                                 ++idx;
1632                         }
1633                         info[idx] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_127_96};
1635                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1636                 } else {
1637                         if (data->offset < 32)
1638                                 info[idx++] = (struct field_modify_info){4, 0,
1639                                                 MLX5_MODI_OUT_SIPV6_31_0};
1640                         if (data->offset < 64)
1641                                 info[idx++] = (struct field_modify_info){4, 0,
1642                                                 MLX5_MODI_OUT_SIPV6_63_32};
1643                         if (data->offset < 96)
1644                                 info[idx++] = (struct field_modify_info){4, 0,
1645                                                 MLX5_MODI_OUT_SIPV6_95_64};
1646                         if (data->offset < 128)
1647                                 info[idx++] = (struct field_modify_info){4, 0,
1648                                                 MLX5_MODI_OUT_SIPV6_127_96};
1649                 }
1650                 break;
1651         case RTE_FLOW_FIELD_IPV6_DST:
1652                 if (mask) {
1653                         if (data->offset < 32) {
1654                                 info[idx] = (struct field_modify_info){4, 12,
1655                                                 MLX5_MODI_OUT_DIPV6_31_0};
1656                                 if (width < 32) {
1657                                         mask[3] =
1658                                                 rte_cpu_to_be_32(0xffffffff >>
1659                                                                  (32 - width));
1660                                         width = 0;
1661                                 } else {
1662                                         mask[3] = RTE_BE32(0xffffffff);
1663                                         width -= 32;
1664                                 }
1665                                 if (!width)
1666                                         break;
1667                                 ++idx;
1668                         }
1669                         if (data->offset < 64) {
1670                                 info[idx] = (struct field_modify_info){4, 8,
1671                                                 MLX5_MODI_OUT_DIPV6_63_32};
1672                                 if (width < 32) {
1673                                         mask[2] =
1674                                                 rte_cpu_to_be_32(0xffffffff >>
1675                                                                  (32 - width));
1676                                         width = 0;
1677                                 } else {
1678                                         mask[2] = RTE_BE32(0xffffffff);
1679                                         width -= 32;
1680                                 }
1681                                 if (!width)
1682                                         break;
1683                                 ++idx;
1684                         }
1685                         if (data->offset < 96) {
1686                                 info[idx] = (struct field_modify_info){4, 4,
1687                                                 MLX5_MODI_OUT_DIPV6_95_64};
1688                                 if (width < 32) {
1689                                         mask[1] =
1690                                                 rte_cpu_to_be_32(0xffffffff >>
1691                                                                  (32 - width));
1692                                         width = 0;
1693                                 } else {
1694                                         mask[1] = RTE_BE32(0xffffffff);
1695                                         width -= 32;
1696                                 }
1697                                 if (!width)
1698                                         break;
1699                                 ++idx;
1700                         }
1701                         info[idx] = (struct field_modify_info){4, 0,
1702                                                 MLX5_MODI_OUT_DIPV6_127_96};
1703                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1704                 } else {
1705                         if (data->offset < 32)
1706                                 info[idx++] = (struct field_modify_info){4, 0,
1707                                                 MLX5_MODI_OUT_DIPV6_31_0};
1708                         if (data->offset < 64)
1709                                 info[idx++] = (struct field_modify_info){4, 0,
1710                                                 MLX5_MODI_OUT_DIPV6_63_32};
1711                         if (data->offset < 96)
1712                                 info[idx++] = (struct field_modify_info){4, 0,
1713                                                 MLX5_MODI_OUT_DIPV6_95_64};
1714                         if (data->offset < 128)
1715                                 info[idx++] = (struct field_modify_info){4, 0,
1716                                                 MLX5_MODI_OUT_DIPV6_127_96};
1717                 }
1718                 break;
1719         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1720                 info[idx] = (struct field_modify_info){2, 0,
1721                                         MLX5_MODI_OUT_TCP_SPORT};
1722                 if (mask)
1723                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1724                 break;
1725         case RTE_FLOW_FIELD_TCP_PORT_DST:
1726                 info[idx] = (struct field_modify_info){2, 0,
1727                                         MLX5_MODI_OUT_TCP_DPORT};
1728                 if (mask)
1729                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1730                 break;
1731         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1732                 info[idx] = (struct field_modify_info){4, 0,
1733                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1734                 if (mask)
1735                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1736                                                      (32 - width));
1737                 break;
1738         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1739                 info[idx] = (struct field_modify_info){4, 0,
1740                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1741                 if (mask)
1742                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1743                                                      (32 - width));
1744                 break;
1745         case RTE_FLOW_FIELD_TCP_FLAGS:
1746                 info[idx] = (struct field_modify_info){2, 0,
1747                                         MLX5_MODI_OUT_TCP_FLAGS};
1748                 if (mask)
1749                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1750                 break;
1751         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1752                 info[idx] = (struct field_modify_info){2, 0,
1753                                         MLX5_MODI_OUT_UDP_SPORT};
1754                 if (mask)
1755                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1756                 break;
1757         case RTE_FLOW_FIELD_UDP_PORT_DST:
1758                 info[idx] = (struct field_modify_info){2, 0,
1759                                         MLX5_MODI_OUT_UDP_DPORT};
1760                 if (mask)
1761                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1762                 break;
1763         case RTE_FLOW_FIELD_VXLAN_VNI:
1764                 /* not supported yet */
1765                 break;
1766         case RTE_FLOW_FIELD_GENEVE_VNI:
1767                 /* not supported yet*/
1768                 break;
1769         case RTE_FLOW_FIELD_GTP_TEID:
1770                 info[idx] = (struct field_modify_info){4, 0,
1771                                         MLX5_MODI_GTP_TEID};
1772                 if (mask)
1773                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1774                                                      (32 - width));
1775                 break;
1776         case RTE_FLOW_FIELD_TAG:
1777                 {
1778                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1779                                                    data->level, error);
1780                         if (reg < 0)
1781                                 return;
1782                         MLX5_ASSERT(reg != REG_NON);
1783                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1784                         info[idx] = (struct field_modify_info){4, 0,
1785                                                 reg_to_field[reg]};
1786                         if (mask)
1787                                 mask[idx] =
1788                                         rte_cpu_to_be_32(0xffffffff >>
1789                                                          (32 - width));
1790                 }
1791                 break;
1792         case RTE_FLOW_FIELD_MARK:
1793                 {
1794                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1795                         uint32_t mark_count = __builtin_popcount(mark_mask);
1796                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1797                                                        0, error);
1798                         if (reg < 0)
1799                                 return;
1800                         MLX5_ASSERT(reg != REG_NON);
1801                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1802                         info[idx] = (struct field_modify_info){4, 0,
1803                                                 reg_to_field[reg]};
1804                         if (mask)
1805                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1806                                          (mark_count - width)) & mark_mask);
1807                 }
1808                 break;
1809         case RTE_FLOW_FIELD_META:
1810                 {
1811                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1812                         uint32_t meta_count = __builtin_popcount(meta_mask);
1813                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1814                         if (reg < 0)
1815                                 return;
1816                         MLX5_ASSERT(reg != REG_NON);
1817                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1818                         info[idx] = (struct field_modify_info){4, 0,
1819                                                 reg_to_field[reg]};
1820                         if (mask)
1821                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1822                                         (meta_count - width)) & meta_mask);
1823                 }
1824                 break;
1825         case RTE_FLOW_FIELD_POINTER:
1826         case RTE_FLOW_FIELD_VALUE:
1827         default:
1828                 MLX5_ASSERT(false);
1829                 break;
1830         }
1831 }
1832
1833 /**
1834  * Convert modify_field action to DV specification.
1835  *
1836  * @param[in] dev
1837  *   Pointer to the rte_eth_dev structure.
1838  * @param[in,out] resource
1839  *   Pointer to the modify-header resource.
1840  * @param[in] action
1841  *   Pointer to action specification.
1842  * @param[in] attr
1843  *   Attributes of flow that includes this item.
1844  * @param[out] error
1845  *   Pointer to the error structure.
1846  *
1847  * @return
1848  *   0 on success, a negative errno value otherwise and rte_errno is set.
1849  */
1850 static int
1851 flow_dv_convert_action_modify_field
1852                         (struct rte_eth_dev *dev,
1853                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1854                          const struct rte_flow_action *action,
1855                          const struct rte_flow_attr *attr,
1856                          struct rte_flow_error *error)
1857 {
1858         const struct rte_flow_action_modify_field *conf =
1859                 (const struct rte_flow_action_modify_field *)(action->conf);
1860         struct rte_flow_item item = {
1861                 .spec = NULL,
1862                 .mask = NULL
1863         };
1864         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1865                                                                 {0, 0, 0} };
1866         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1867                                                                 {0, 0, 0} };
1868         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1869         uint32_t type, meta = 0;
1870
1871         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1872             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1873                 type = MLX5_MODIFICATION_TYPE_SET;
1874                 /** For SET fill the destination field (field) first. */
1875                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1876                                                   conf->width, dev,
1877                                                   attr, error);
1878                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1879                                         (void *)(uintptr_t)conf->src.pvalue :
1880                                         (void *)(uintptr_t)&conf->src.value;
1881                 if (conf->dst.field == RTE_FLOW_FIELD_META) {
1882                         meta = *(const unaligned_uint32_t *)item.spec;
1883                         meta = rte_cpu_to_be_32(meta);
1884                         item.spec = &meta;
1885                 }
1886         } else {
1887                 type = MLX5_MODIFICATION_TYPE_COPY;
1888                 /** For COPY fill the destination field (dcopy) without mask. */
1889                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1890                                                   conf->width, dev,
1891                                                   attr, error);
1892                 /** Then construct the source field (field) with mask. */
1893                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1894                                                   conf->width, dev,
1895                                                   attr, error);
1896         }
1897         item.mask = &mask;
1898         return flow_dv_convert_modify_action(&item,
1899                         field, dcopy, resource, type, error);
1900 }
1901
1902 /**
1903  * Validate MARK item.
1904  *
1905  * @param[in] dev
1906  *   Pointer to the rte_eth_dev structure.
1907  * @param[in] item
1908  *   Item specification.
1909  * @param[in] attr
1910  *   Attributes of flow that includes this item.
1911  * @param[out] error
1912  *   Pointer to error structure.
1913  *
1914  * @return
1915  *   0 on success, a negative errno value otherwise and rte_errno is set.
1916  */
1917 static int
1918 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1919                            const struct rte_flow_item *item,
1920                            const struct rte_flow_attr *attr __rte_unused,
1921                            struct rte_flow_error *error)
1922 {
1923         struct mlx5_priv *priv = dev->data->dev_private;
1924         struct mlx5_sh_config *config = &priv->sh->config;
1925         const struct rte_flow_item_mark *spec = item->spec;
1926         const struct rte_flow_item_mark *mask = item->mask;
1927         const struct rte_flow_item_mark nic_mask = {
1928                 .id = priv->sh->dv_mark_mask,
1929         };
1930         int ret;
1931
1932         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1933                 return rte_flow_error_set(error, ENOTSUP,
1934                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1935                                           "extended metadata feature"
1936                                           " isn't enabled");
1937         if (!mlx5_flow_ext_mreg_supported(dev))
1938                 return rte_flow_error_set(error, ENOTSUP,
1939                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1940                                           "extended metadata register"
1941                                           " isn't supported");
1942         if (!nic_mask.id)
1943                 return rte_flow_error_set(error, ENOTSUP,
1944                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1945                                           "extended metadata register"
1946                                           " isn't available");
1947         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1948         if (ret < 0)
1949                 return ret;
1950         if (!spec)
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1953                                           item->spec,
1954                                           "data cannot be empty");
1955         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1956                 return rte_flow_error_set(error, EINVAL,
1957                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1958                                           &spec->id,
1959                                           "mark id exceeds the limit");
1960         if (!mask)
1961                 mask = &nic_mask;
1962         if (!mask->id)
1963                 return rte_flow_error_set(error, EINVAL,
1964                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1965                                         "mask cannot be zero");
1966
1967         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1968                                         (const uint8_t *)&nic_mask,
1969                                         sizeof(struct rte_flow_item_mark),
1970                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1971         if (ret < 0)
1972                 return ret;
1973         return 0;
1974 }
1975
1976 /**
1977  * Validate META item.
1978  *
1979  * @param[in] dev
1980  *   Pointer to the rte_eth_dev structure.
1981  * @param[in] item
1982  *   Item specification.
1983  * @param[in] attr
1984  *   Attributes of flow that includes this item.
1985  * @param[out] error
1986  *   Pointer to error structure.
1987  *
1988  * @return
1989  *   0 on success, a negative errno value otherwise and rte_errno is set.
1990  */
1991 static int
1992 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1993                            const struct rte_flow_item *item,
1994                            const struct rte_flow_attr *attr,
1995                            struct rte_flow_error *error)
1996 {
1997         struct mlx5_priv *priv = dev->data->dev_private;
1998         struct mlx5_sh_config *config = &priv->sh->config;
1999         const struct rte_flow_item_meta *spec = item->spec;
2000         const struct rte_flow_item_meta *mask = item->mask;
2001         struct rte_flow_item_meta nic_mask = {
2002                 .data = UINT32_MAX
2003         };
2004         int reg;
2005         int ret;
2006
2007         if (!spec)
2008                 return rte_flow_error_set(error, EINVAL,
2009                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2010                                           item->spec,
2011                                           "data cannot be empty");
2012         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2013                 if (!mlx5_flow_ext_mreg_supported(dev))
2014                         return rte_flow_error_set(error, ENOTSUP,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2016                                           "extended metadata register"
2017                                           " isn't supported");
2018                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2019                 if (reg < 0)
2020                         return reg;
2021                 if (reg == REG_NON)
2022                         return rte_flow_error_set(error, ENOTSUP,
2023                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2024                                         "unavailable extended metadata register");
2025                 if (reg == REG_B)
2026                         return rte_flow_error_set(error, ENOTSUP,
2027                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2028                                           "match on reg_b "
2029                                           "isn't supported");
2030                 if (reg != REG_A)
2031                         nic_mask.data = priv->sh->dv_meta_mask;
2032         } else {
2033                 if (attr->transfer)
2034                         return rte_flow_error_set(error, ENOTSUP,
2035                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2036                                         "extended metadata feature "
2037                                         "should be enabled when "
2038                                         "meta item is requested "
2039                                         "with e-switch mode ");
2040                 if (attr->ingress)
2041                         return rte_flow_error_set(error, ENOTSUP,
2042                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2043                                         "match on metadata for ingress "
2044                                         "is not supported in legacy "
2045                                         "metadata mode");
2046         }
2047         if (!mask)
2048                 mask = &rte_flow_item_meta_mask;
2049         if (!mask->data)
2050                 return rte_flow_error_set(error, EINVAL,
2051                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2052                                         "mask cannot be zero");
2053
2054         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2055                                         (const uint8_t *)&nic_mask,
2056                                         sizeof(struct rte_flow_item_meta),
2057                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2058         return ret;
2059 }
2060
2061 /**
2062  * Validate TAG item.
2063  *
2064  * @param[in] dev
2065  *   Pointer to the rte_eth_dev structure.
2066  * @param[in] item
2067  *   Item specification.
2068  * @param[in] attr
2069  *   Attributes of flow that includes this item.
2070  * @param[out] error
2071  *   Pointer to error structure.
2072  *
2073  * @return
2074  *   0 on success, a negative errno value otherwise and rte_errno is set.
2075  */
2076 static int
2077 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2078                           const struct rte_flow_item *item,
2079                           const struct rte_flow_attr *attr __rte_unused,
2080                           struct rte_flow_error *error)
2081 {
2082         const struct rte_flow_item_tag *spec = item->spec;
2083         const struct rte_flow_item_tag *mask = item->mask;
2084         const struct rte_flow_item_tag nic_mask = {
2085                 .data = RTE_BE32(UINT32_MAX),
2086                 .index = 0xff,
2087         };
2088         int ret;
2089
2090         if (!mlx5_flow_ext_mreg_supported(dev))
2091                 return rte_flow_error_set(error, ENOTSUP,
2092                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2093                                           "extensive metadata register"
2094                                           " isn't supported");
2095         if (!spec)
2096                 return rte_flow_error_set(error, EINVAL,
2097                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2098                                           item->spec,
2099                                           "data cannot be empty");
2100         if (!mask)
2101                 mask = &rte_flow_item_tag_mask;
2102         if (!mask->data)
2103                 return rte_flow_error_set(error, EINVAL,
2104                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2105                                         "mask cannot be zero");
2106
2107         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2108                                         (const uint8_t *)&nic_mask,
2109                                         sizeof(struct rte_flow_item_tag),
2110                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2111         if (ret < 0)
2112                 return ret;
2113         if (mask->index != 0xff)
2114                 return rte_flow_error_set(error, EINVAL,
2115                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2116                                           "partial mask for tag index"
2117                                           " is not supported");
2118         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2119         if (ret < 0)
2120                 return ret;
2121         MLX5_ASSERT(ret != REG_NON);
2122         return 0;
2123 }
2124
2125 /**
2126  * Validate vport item.
2127  *
2128  * @param[in] dev
2129  *   Pointer to the rte_eth_dev structure.
2130  * @param[in] item
2131  *   Item specification.
2132  * @param[in] attr
2133  *   Attributes of flow that includes this item.
2134  * @param[in] item_flags
2135  *   Bit-fields that holds the items detected until now.
2136  * @param[out] error
2137  *   Pointer to error structure.
2138  *
2139  * @return
2140  *   0 on success, a negative errno value otherwise and rte_errno is set.
2141  */
2142 static int
2143 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2144                               const struct rte_flow_item *item,
2145                               const struct rte_flow_attr *attr,
2146                               uint64_t item_flags,
2147                               struct rte_flow_error *error)
2148 {
2149         const struct rte_flow_item_port_id *spec = item->spec;
2150         const struct rte_flow_item_port_id *mask = item->mask;
2151         const struct rte_flow_item_port_id switch_mask = {
2152                         .id = 0xffffffff,
2153         };
2154         struct mlx5_priv *esw_priv;
2155         struct mlx5_priv *dev_priv;
2156         int ret;
2157
2158         if (!attr->transfer)
2159                 return rte_flow_error_set(error, EINVAL,
2160                                           RTE_FLOW_ERROR_TYPE_ITEM,
2161                                           NULL,
2162                                           "match on port id is valid only"
2163                                           " when transfer flag is enabled");
2164         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2165                 return rte_flow_error_set(error, ENOTSUP,
2166                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2167                                           "multiple source ports are not"
2168                                           " supported");
2169         if (!mask)
2170                 mask = &switch_mask;
2171         if (mask->id != 0xffffffff)
2172                 return rte_flow_error_set(error, ENOTSUP,
2173                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2174                                            mask,
2175                                            "no support for partial mask on"
2176                                            " \"id\" field");
2177         ret = mlx5_flow_item_acceptable
2178                                 (item, (const uint8_t *)mask,
2179                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2180                                  sizeof(struct rte_flow_item_port_id),
2181                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2182         if (ret)
2183                 return ret;
2184         if (!spec)
2185                 return 0;
2186         if (spec->id == MLX5_PORT_ESW_MGR)
2187                 return 0;
2188         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2189         if (!esw_priv)
2190                 return rte_flow_error_set(error, rte_errno,
2191                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2192                                           "failed to obtain E-Switch info for"
2193                                           " port");
2194         dev_priv = mlx5_dev_to_eswitch_info(dev);
2195         if (!dev_priv)
2196                 return rte_flow_error_set(error, rte_errno,
2197                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2198                                           NULL,
2199                                           "failed to obtain E-Switch info");
2200         if (esw_priv->domain_id != dev_priv->domain_id)
2201                 return rte_flow_error_set(error, EINVAL,
2202                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2203                                           "cannot match on a port from a"
2204                                           " different E-Switch");
2205         return 0;
2206 }
2207
2208 /**
2209  * Validate VLAN item.
2210  *
2211  * @param[in] item
2212  *   Item specification.
2213  * @param[in] item_flags
2214  *   Bit-fields that holds the items detected until now.
2215  * @param[in] dev
2216  *   Ethernet device flow is being created on.
2217  * @param[out] error
2218  *   Pointer to error structure.
2219  *
2220  * @return
2221  *   0 on success, a negative errno value otherwise and rte_errno is set.
2222  */
2223 static int
2224 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2225                            uint64_t item_flags,
2226                            struct rte_eth_dev *dev,
2227                            struct rte_flow_error *error)
2228 {
2229         const struct rte_flow_item_vlan *mask = item->mask;
2230         const struct rte_flow_item_vlan nic_mask = {
2231                 .tci = RTE_BE16(UINT16_MAX),
2232                 .inner_type = RTE_BE16(UINT16_MAX),
2233                 .has_more_vlan = 1,
2234         };
2235         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2236         int ret;
2237         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2238                                         MLX5_FLOW_LAYER_INNER_L4) :
2239                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2240                                         MLX5_FLOW_LAYER_OUTER_L4);
2241         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2242                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2243
2244         if (item_flags & vlanm)
2245                 return rte_flow_error_set(error, EINVAL,
2246                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2247                                           "multiple VLAN layers not supported");
2248         else if ((item_flags & l34m) != 0)
2249                 return rte_flow_error_set(error, EINVAL,
2250                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2251                                           "VLAN cannot follow L3/L4 layer");
2252         if (!mask)
2253                 mask = &rte_flow_item_vlan_mask;
2254         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2255                                         (const uint8_t *)&nic_mask,
2256                                         sizeof(struct rte_flow_item_vlan),
2257                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2258         if (ret)
2259                 return ret;
2260         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2261                 struct mlx5_priv *priv = dev->data->dev_private;
2262
2263                 if (priv->vmwa_context) {
2264                         /*
2265                          * Non-NULL context means we have a virtual machine
2266                          * and SR-IOV enabled, we have to create VLAN interface
2267                          * to make hypervisor to setup E-Switch vport
2268                          * context correctly. We avoid creating the multiple
2269                          * VLAN interfaces, so we cannot support VLAN tag mask.
2270                          */
2271                         return rte_flow_error_set(error, EINVAL,
2272                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2273                                                   item,
2274                                                   "VLAN tag mask is not"
2275                                                   " supported in virtual"
2276                                                   " environment");
2277                 }
2278         }
2279         return 0;
2280 }
2281
2282 /*
2283  * GTP flags are contained in 1 byte of the format:
2284  * -------------------------------------------
2285  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2286  * |-----------------------------------------|
2287  * | value | Version | PT | Res | E | S | PN |
2288  * -------------------------------------------
2289  *
2290  * Matching is supported only for GTP flags E, S, PN.
2291  */
2292 #define MLX5_GTP_FLAGS_MASK     0x07
2293
2294 /**
2295  * Validate GTP item.
2296  *
2297  * @param[in] dev
2298  *   Pointer to the rte_eth_dev structure.
2299  * @param[in] item
2300  *   Item specification.
2301  * @param[in] item_flags
2302  *   Bit-fields that holds the items detected until now.
2303  * @param[out] error
2304  *   Pointer to error structure.
2305  *
2306  * @return
2307  *   0 on success, a negative errno value otherwise and rte_errno is set.
2308  */
2309 static int
2310 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2311                           const struct rte_flow_item *item,
2312                           uint64_t item_flags,
2313                           struct rte_flow_error *error)
2314 {
2315         struct mlx5_priv *priv = dev->data->dev_private;
2316         const struct rte_flow_item_gtp *spec = item->spec;
2317         const struct rte_flow_item_gtp *mask = item->mask;
2318         const struct rte_flow_item_gtp nic_mask = {
2319                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2320                 .msg_type = 0xff,
2321                 .teid = RTE_BE32(0xffffffff),
2322         };
2323
2324         if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
2325                 return rte_flow_error_set(error, ENOTSUP,
2326                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2327                                           "GTP support is not enabled");
2328         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2329                 return rte_flow_error_set(error, ENOTSUP,
2330                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2331                                           "multiple tunnel layers not"
2332                                           " supported");
2333         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2334                 return rte_flow_error_set(error, EINVAL,
2335                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2336                                           "no outer UDP layer found");
2337         if (!mask)
2338                 mask = &rte_flow_item_gtp_mask;
2339         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2340                 return rte_flow_error_set(error, ENOTSUP,
2341                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2342                                           "Match is supported for GTP"
2343                                           " flags only");
2344         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2345                                          (const uint8_t *)&nic_mask,
2346                                          sizeof(struct rte_flow_item_gtp),
2347                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2348 }
2349
2350 /**
2351  * Validate GTP PSC item.
2352  *
2353  * @param[in] item
2354  *   Item specification.
2355  * @param[in] last_item
2356  *   Previous validated item in the pattern items.
2357  * @param[in] gtp_item
2358  *   Previous GTP item specification.
2359  * @param[in] attr
2360  *   Pointer to flow attributes.
2361  * @param[out] error
2362  *   Pointer to error structure.
2363  *
2364  * @return
2365  *   0 on success, a negative errno value otherwise and rte_errno is set.
2366  */
2367 static int
2368 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2369                               uint64_t last_item,
2370                               const struct rte_flow_item *gtp_item,
2371                               const struct rte_flow_attr *attr,
2372                               struct rte_flow_error *error)
2373 {
2374         const struct rte_flow_item_gtp *gtp_spec;
2375         const struct rte_flow_item_gtp *gtp_mask;
2376         const struct rte_flow_item_gtp_psc *mask;
2377         const struct rte_flow_item_gtp_psc nic_mask = {
2378                 .hdr.type = 0xF,
2379                 .hdr.qfi = 0x3F,
2380         };
2381
2382         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2383                 return rte_flow_error_set
2384                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2385                          "GTP PSC item must be preceded with GTP item");
2386         gtp_spec = gtp_item->spec;
2387         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2388         /* GTP spec and E flag is requested to match zero. */
2389         if (gtp_spec &&
2390                 (gtp_mask->v_pt_rsv_flags &
2391                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2392                 return rte_flow_error_set
2393                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2394                          "GTP E flag must be 1 to match GTP PSC");
2395         /* Check the flow is not created in group zero. */
2396         if (!attr->transfer && !attr->group)
2397                 return rte_flow_error_set
2398                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2399                          "GTP PSC is not supported for group 0");
2400         /* GTP spec is here and E flag is requested to match zero. */
2401         if (!item->spec)
2402                 return 0;
2403         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2404         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2405                                          (const uint8_t *)&nic_mask,
2406                                          sizeof(struct rte_flow_item_gtp_psc),
2407                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2408 }
2409
2410 /**
2411  * Validate IPV4 item.
2412  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2413  * add specific validation of fragment_offset field,
2414  *
2415  * @param[in] item
2416  *   Item specification.
2417  * @param[in] item_flags
2418  *   Bit-fields that holds the items detected until now.
2419  * @param[out] error
2420  *   Pointer to error structure.
2421  *
2422  * @return
2423  *   0 on success, a negative errno value otherwise and rte_errno is set.
2424  */
2425 static int
2426 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2427                            const struct rte_flow_item *item,
2428                            uint64_t item_flags, uint64_t last_item,
2429                            uint16_t ether_type, struct rte_flow_error *error)
2430 {
2431         int ret;
2432         struct mlx5_priv *priv = dev->data->dev_private;
2433         struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
2434         const struct rte_flow_item_ipv4 *spec = item->spec;
2435         const struct rte_flow_item_ipv4 *last = item->last;
2436         const struct rte_flow_item_ipv4 *mask = item->mask;
2437         rte_be16_t fragment_offset_spec = 0;
2438         rte_be16_t fragment_offset_last = 0;
2439         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2440                 .hdr = {
2441                         .src_addr = RTE_BE32(0xffffffff),
2442                         .dst_addr = RTE_BE32(0xffffffff),
2443                         .type_of_service = 0xff,
2444                         .fragment_offset = RTE_BE16(0xffff),
2445                         .next_proto_id = 0xff,
2446                         .time_to_live = 0xff,
2447                 },
2448         };
2449
2450         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2451                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2452                 bool ihl_cap = !tunnel ?
2453                                attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
2454                 if (!ihl_cap)
2455                         return rte_flow_error_set(error, ENOTSUP,
2456                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2457                                                   item,
2458                                                   "IPV4 ihl offload not supported");
2459                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2460         }
2461         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2462                                            ether_type, &nic_ipv4_mask,
2463                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2464         if (ret < 0)
2465                 return ret;
2466         if (spec && mask)
2467                 fragment_offset_spec = spec->hdr.fragment_offset &
2468                                        mask->hdr.fragment_offset;
2469         if (!fragment_offset_spec)
2470                 return 0;
2471         /*
2472          * spec and mask are valid, enforce using full mask to make sure the
2473          * complete value is used correctly.
2474          */
2475         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2476                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2477                 return rte_flow_error_set(error, EINVAL,
2478                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2479                                           item, "must use full mask for"
2480                                           " fragment_offset");
2481         /*
2482          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2483          * indicating this is 1st fragment of fragmented packet.
2484          * This is not yet supported in MLX5, return appropriate error message.
2485          */
2486         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2487                 return rte_flow_error_set(error, ENOTSUP,
2488                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2489                                           "match on first fragment not "
2490                                           "supported");
2491         if (fragment_offset_spec && !last)
2492                 return rte_flow_error_set(error, ENOTSUP,
2493                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2494                                           "specified value not supported");
2495         /* spec and last are valid, validate the specified range. */
2496         fragment_offset_last = last->hdr.fragment_offset &
2497                                mask->hdr.fragment_offset;
2498         /*
2499          * Match on fragment_offset spec 0x2001 and last 0x3fff
2500          * means MF is 1 and frag-offset is > 0.
2501          * This packet is fragment 2nd and onward, excluding last.
2502          * This is not yet supported in MLX5, return appropriate
2503          * error message.
2504          */
2505         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2506             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2507                 return rte_flow_error_set(error, ENOTSUP,
2508                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2509                                           last, "match on following "
2510                                           "fragments not supported");
2511         /*
2512          * Match on fragment_offset spec 0x0001 and last 0x1fff
2513          * means MF is 0 and frag-offset is > 0.
2514          * This packet is last fragment of fragmented packet.
2515          * This is not yet supported in MLX5, return appropriate
2516          * error message.
2517          */
2518         if (fragment_offset_spec == RTE_BE16(1) &&
2519             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2520                 return rte_flow_error_set(error, ENOTSUP,
2521                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2522                                           last, "match on last "
2523                                           "fragment not supported");
2524         /*
2525          * Match on fragment_offset spec 0x0001 and last 0x3fff
2526          * means MF and/or frag-offset is not 0.
2527          * This is a fragmented packet.
2528          * Other range values are invalid and rejected.
2529          */
2530         if (!(fragment_offset_spec == RTE_BE16(1) &&
2531               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2532                 return rte_flow_error_set(error, ENOTSUP,
2533                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2534                                           "specified range not supported");
2535         return 0;
2536 }
2537
2538 /**
2539  * Validate IPV6 fragment extension item.
2540  *
2541  * @param[in] item
2542  *   Item specification.
2543  * @param[in] item_flags
2544  *   Bit-fields that holds the items detected until now.
2545  * @param[out] error
2546  *   Pointer to error structure.
2547  *
2548  * @return
2549  *   0 on success, a negative errno value otherwise and rte_errno is set.
2550  */
2551 static int
2552 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2553                                     uint64_t item_flags,
2554                                     struct rte_flow_error *error)
2555 {
2556         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2557         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2558         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2559         rte_be16_t frag_data_spec = 0;
2560         rte_be16_t frag_data_last = 0;
2561         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2562         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2563                                       MLX5_FLOW_LAYER_OUTER_L4;
2564         int ret = 0;
2565         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2566                 .hdr = {
2567                         .next_header = 0xff,
2568                         .frag_data = RTE_BE16(0xffff),
2569                 },
2570         };
2571
2572         if (item_flags & l4m)
2573                 return rte_flow_error_set(error, EINVAL,
2574                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2575                                           "ipv6 fragment extension item cannot "
2576                                           "follow L4 item.");
2577         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2578             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2579                 return rte_flow_error_set(error, EINVAL,
2580                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2581                                           "ipv6 fragment extension item must "
2582                                           "follow ipv6 item");
2583         if (spec && mask)
2584                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2585         if (!frag_data_spec)
2586                 return 0;
2587         /*
2588          * spec and mask are valid, enforce using full mask to make sure the
2589          * complete value is used correctly.
2590          */
2591         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2592                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2593                 return rte_flow_error_set(error, EINVAL,
2594                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2595                                           item, "must use full mask for"
2596                                           " frag_data");
2597         /*
2598          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2599          * This is 1st fragment of fragmented packet.
2600          */
2601         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2602                 return rte_flow_error_set(error, ENOTSUP,
2603                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2604                                           "match on first fragment not "
2605                                           "supported");
2606         if (frag_data_spec && !last)
2607                 return rte_flow_error_set(error, EINVAL,
2608                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2609                                           "specified value not supported");
2610         ret = mlx5_flow_item_acceptable
2611                                 (item, (const uint8_t *)mask,
2612                                  (const uint8_t *)&nic_mask,
2613                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2614                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2615         if (ret)
2616                 return ret;
2617         /* spec and last are valid, validate the specified range. */
2618         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2619         /*
2620          * Match on frag_data spec 0x0009 and last 0xfff9
2621          * means M is 1 and frag-offset is > 0.
2622          * This packet is fragment 2nd and onward, excluding last.
2623          * This is not yet supported in MLX5, return appropriate
2624          * error message.
2625          */
2626         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2627                                        RTE_IPV6_EHDR_MF_MASK) &&
2628             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2631                                           last, "match on following "
2632                                           "fragments not supported");
2633         /*
2634          * Match on frag_data spec 0x0008 and last 0xfff8
2635          * means M is 0 and frag-offset is > 0.
2636          * This packet is last fragment of fragmented packet.
2637          * This is not yet supported in MLX5, return appropriate
2638          * error message.
2639          */
2640         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2641             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2642                 return rte_flow_error_set(error, ENOTSUP,
2643                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2644                                           last, "match on last "
2645                                           "fragment not supported");
2646         /* Other range values are invalid and rejected. */
2647         return rte_flow_error_set(error, EINVAL,
2648                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2649                                   "specified range not supported");
2650 }
2651
2652 /*
2653  * Validate ASO CT item.
2654  *
2655  * @param[in] dev
2656  *   Pointer to the rte_eth_dev structure.
2657  * @param[in] item
2658  *   Item specification.
2659  * @param[in] item_flags
2660  *   Pointer to bit-fields that holds the items detected until now.
2661  * @param[out] error
2662  *   Pointer to error structure.
2663  *
2664  * @return
2665  *   0 on success, a negative errno value otherwise and rte_errno is set.
2666  */
2667 static int
2668 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2669                              const struct rte_flow_item *item,
2670                              uint64_t *item_flags,
2671                              struct rte_flow_error *error)
2672 {
2673         const struct rte_flow_item_conntrack *spec = item->spec;
2674         const struct rte_flow_item_conntrack *mask = item->mask;
2675         RTE_SET_USED(dev);
2676         uint32_t flags;
2677
2678         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2679                 return rte_flow_error_set(error, EINVAL,
2680                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2681                                           "Only one CT is supported");
2682         if (!mask)
2683                 mask = &rte_flow_item_conntrack_mask;
2684         flags = spec->flags & mask->flags;
2685         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2686             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2687              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2688              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2689                 return rte_flow_error_set(error, EINVAL,
2690                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2691                                           "Conflict status bits");
2692         /* State change also needs to be considered. */
2693         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2694         return 0;
2695 }
2696
2697 /**
2698  * Validate the pop VLAN action.
2699  *
2700  * @param[in] dev
2701  *   Pointer to the rte_eth_dev structure.
2702  * @param[in] action_flags
2703  *   Holds the actions detected until now.
2704  * @param[in] action
2705  *   Pointer to the pop vlan action.
2706  * @param[in] item_flags
2707  *   The items found in this flow rule.
2708  * @param[in] attr
2709  *   Pointer to flow attributes.
2710  * @param[out] error
2711  *   Pointer to error structure.
2712  *
2713  * @return
2714  *   0 on success, a negative errno value otherwise and rte_errno is set.
2715  */
2716 static int
2717 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2718                                  uint64_t action_flags,
2719                                  const struct rte_flow_action *action,
2720                                  uint64_t item_flags,
2721                                  const struct rte_flow_attr *attr,
2722                                  struct rte_flow_error *error)
2723 {
2724         const struct mlx5_priv *priv = dev->data->dev_private;
2725         struct mlx5_dev_ctx_shared *sh = priv->sh;
2726         bool direction_error = false;
2727
2728         if (!priv->sh->pop_vlan_action)
2729                 return rte_flow_error_set(error, ENOTSUP,
2730                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2731                                           NULL,
2732                                           "pop vlan action is not supported");
2733         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2734         if (attr->transfer) {
2735                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2736                 bool is_cx5 = sh->steering_format_version ==
2737                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2738
2739                 if (fdb_tx && is_cx5)
2740                         direction_error = true;
2741         } else if (attr->egress) {
2742                 direction_error = true;
2743         }
2744         if (direction_error)
2745                 return rte_flow_error_set(error, ENOTSUP,
2746                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2747                                           NULL,
2748                                           "pop vlan action not supported for egress");
2749         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2750                 return rte_flow_error_set(error, ENOTSUP,
2751                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2752                                           "no support for multiple VLAN "
2753                                           "actions");
2754         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2755         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2756             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2757                 return rte_flow_error_set(error, ENOTSUP,
2758                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2759                                           NULL,
2760                                           "cannot pop vlan after decap without "
2761                                           "match on inner vlan in the flow");
2762         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2763         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2764             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2765                 return rte_flow_error_set(error, ENOTSUP,
2766                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2767                                           NULL,
2768                                           "cannot pop vlan without a "
2769                                           "match on (outer) vlan in the flow");
2770         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2771                 return rte_flow_error_set(error, EINVAL,
2772                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2773                                           "wrong action order, port_id should "
2774                                           "be after pop VLAN action");
2775         if (!attr->transfer && priv->representor)
2776                 return rte_flow_error_set(error, ENOTSUP,
2777                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2778                                           "pop vlan action for VF representor "
2779                                           "not supported on NIC table");
2780         return 0;
2781 }
2782
2783 /**
2784  * Get VLAN default info from vlan match info.
2785  *
2786  * @param[in] items
2787  *   the list of item specifications.
2788  * @param[out] vlan
2789  *   pointer VLAN info to fill to.
2790  *
2791  * @return
2792  *   0 on success, a negative errno value otherwise and rte_errno is set.
2793  */
2794 static void
2795 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2796                                   struct rte_vlan_hdr *vlan)
2797 {
2798         const struct rte_flow_item_vlan nic_mask = {
2799                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2800                                 MLX5DV_FLOW_VLAN_VID_MASK),
2801                 .inner_type = RTE_BE16(0xffff),
2802         };
2803
2804         if (items == NULL)
2805                 return;
2806         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2807                 int type = items->type;
2808
2809                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2810                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2811                         break;
2812         }
2813         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2814                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2815                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2816
2817                 /* If VLAN item in pattern doesn't contain data, return here. */
2818                 if (!vlan_v)
2819                         return;
2820                 if (!vlan_m)
2821                         vlan_m = &nic_mask;
2822                 /* Only full match values are accepted */
2823                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2824                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2825                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2826                         vlan->vlan_tci |=
2827                                 rte_be_to_cpu_16(vlan_v->tci &
2828                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2829                 }
2830                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2831                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2832                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2833                         vlan->vlan_tci |=
2834                                 rte_be_to_cpu_16(vlan_v->tci &
2835                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2836                 }
2837                 if (vlan_m->inner_type == nic_mask.inner_type)
2838                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2839                                                            vlan_m->inner_type);
2840         }
2841 }
2842
2843 /**
2844  * Validate the push VLAN action.
2845  *
2846  * @param[in] dev
2847  *   Pointer to the rte_eth_dev structure.
2848  * @param[in] action_flags
2849  *   Holds the actions detected until now.
2850  * @param[in] item_flags
2851  *   The items found in this flow rule.
2852  * @param[in] action
2853  *   Pointer to the action structure.
2854  * @param[in] attr
2855  *   Pointer to flow attributes
2856  * @param[out] error
2857  *   Pointer to error structure.
2858  *
2859  * @return
2860  *   0 on success, a negative errno value otherwise and rte_errno is set.
2861  */
2862 static int
2863 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2864                                   uint64_t action_flags,
2865                                   const struct rte_flow_item_vlan *vlan_m,
2866                                   const struct rte_flow_action *action,
2867                                   const struct rte_flow_attr *attr,
2868                                   struct rte_flow_error *error)
2869 {
2870         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2871         const struct mlx5_priv *priv = dev->data->dev_private;
2872         struct mlx5_dev_ctx_shared *sh = priv->sh;
2873         bool direction_error = false;
2874
2875         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2876             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2877                 return rte_flow_error_set(error, EINVAL,
2878                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2879                                           "invalid vlan ethertype");
2880         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2881                 return rte_flow_error_set(error, EINVAL,
2882                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2883                                           "wrong action order, port_id should "
2884                                           "be after push VLAN");
2885         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2886         if (attr->transfer) {
2887                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2888                 bool is_cx5 = sh->steering_format_version ==
2889                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2890
2891                 if (!fdb_tx && is_cx5)
2892                         direction_error = true;
2893         } else if (attr->ingress) {
2894                 direction_error = true;
2895         }
2896         if (direction_error)
2897                 return rte_flow_error_set(error, ENOTSUP,
2898                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2899                                           NULL,
2900                                           "push vlan action not supported for ingress");
2901         if (!attr->transfer && priv->representor)
2902                 return rte_flow_error_set(error, ENOTSUP,
2903                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2904                                           "push vlan action for VF representor "
2905                                           "not supported on NIC table");
2906         if (vlan_m &&
2907             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2908             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2909                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2910             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2911             !(mlx5_flow_find_action
2912                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2913                 return rte_flow_error_set(error, EINVAL,
2914                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2915                                           "not full match mask on VLAN PCP and "
2916                                           "there is no of_set_vlan_pcp action, "
2917                                           "push VLAN action cannot figure out "
2918                                           "PCP value");
2919         if (vlan_m &&
2920             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2921             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2922                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2923             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2924             !(mlx5_flow_find_action
2925                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2926                 return rte_flow_error_set(error, EINVAL,
2927                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2928                                           "not full match mask on VLAN VID and "
2929                                           "there is no of_set_vlan_vid action, "
2930                                           "push VLAN action cannot figure out "
2931                                           "VID value");
2932         (void)attr;
2933         return 0;
2934 }
2935
2936 /**
2937  * Validate the set VLAN PCP.
2938  *
2939  * @param[in] action_flags
2940  *   Holds the actions detected until now.
2941  * @param[in] actions
2942  *   Pointer to the list of actions remaining in the flow rule.
2943  * @param[out] error
2944  *   Pointer to error structure.
2945  *
2946  * @return
2947  *   0 on success, a negative errno value otherwise and rte_errno is set.
2948  */
2949 static int
2950 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2951                                      const struct rte_flow_action actions[],
2952                                      struct rte_flow_error *error)
2953 {
2954         const struct rte_flow_action *action = actions;
2955         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2956
2957         if (conf->vlan_pcp > 7)
2958                 return rte_flow_error_set(error, EINVAL,
2959                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2960                                           "VLAN PCP value is too big");
2961         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2962                 return rte_flow_error_set(error, ENOTSUP,
2963                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2964                                           "set VLAN PCP action must follow "
2965                                           "the push VLAN action");
2966         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2967                 return rte_flow_error_set(error, ENOTSUP,
2968                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2969                                           "Multiple VLAN PCP modification are "
2970                                           "not supported");
2971         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2972                 return rte_flow_error_set(error, EINVAL,
2973                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2974                                           "wrong action order, port_id should "
2975                                           "be after set VLAN PCP");
2976         return 0;
2977 }
2978
2979 /**
2980  * Validate the set VLAN VID.
2981  *
2982  * @param[in] item_flags
2983  *   Holds the items detected in this rule.
2984  * @param[in] action_flags
2985  *   Holds the actions detected until now.
2986  * @param[in] actions
2987  *   Pointer to the list of actions remaining in the flow rule.
2988  * @param[out] error
2989  *   Pointer to error structure.
2990  *
2991  * @return
2992  *   0 on success, a negative errno value otherwise and rte_errno is set.
2993  */
2994 static int
2995 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2996                                      uint64_t action_flags,
2997                                      const struct rte_flow_action actions[],
2998                                      struct rte_flow_error *error)
2999 {
3000         const struct rte_flow_action *action = actions;
3001         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3002
3003         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3004                 return rte_flow_error_set(error, EINVAL,
3005                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3006                                           "VLAN VID value is too big");
3007         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3008             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3009                 return rte_flow_error_set(error, ENOTSUP,
3010                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3011                                           "set VLAN VID action must follow push"
3012                                           " VLAN action or match on VLAN item");
3013         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3014                 return rte_flow_error_set(error, ENOTSUP,
3015                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3016                                           "Multiple VLAN VID modifications are "
3017                                           "not supported");
3018         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3019                 return rte_flow_error_set(error, EINVAL,
3020                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3021                                           "wrong action order, port_id should "
3022                                           "be after set VLAN VID");
3023         return 0;
3024 }
3025
3026 /*
3027  * Validate the FLAG action.
3028  *
3029  * @param[in] dev
3030  *   Pointer to the rte_eth_dev structure.
3031  * @param[in] action_flags
3032  *   Holds the actions detected until now.
3033  * @param[in] attr
3034  *   Pointer to flow attributes
3035  * @param[out] error
3036  *   Pointer to error structure.
3037  *
3038  * @return
3039  *   0 on success, a negative errno value otherwise and rte_errno is set.
3040  */
3041 static int
3042 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3043                              uint64_t action_flags,
3044                              const struct rte_flow_attr *attr,
3045                              struct rte_flow_error *error)
3046 {
3047         struct mlx5_priv *priv = dev->data->dev_private;
3048         struct mlx5_sh_config *config = &priv->sh->config;
3049         int ret;
3050
3051         /* Fall back if no extended metadata register support. */
3052         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3053                 return mlx5_flow_validate_action_flag(action_flags, attr,
3054                                                       error);
3055         /* Extensive metadata mode requires registers. */
3056         if (!mlx5_flow_ext_mreg_supported(dev))
3057                 return rte_flow_error_set(error, ENOTSUP,
3058                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3059                                           "no metadata registers "
3060                                           "to support flag action");
3061         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3062                 return rte_flow_error_set(error, ENOTSUP,
3063                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3064                                           "extended metadata register"
3065                                           " isn't available");
3066         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3067         if (ret < 0)
3068                 return ret;
3069         MLX5_ASSERT(ret > 0);
3070         if (action_flags & MLX5_FLOW_ACTION_MARK)
3071                 return rte_flow_error_set(error, EINVAL,
3072                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3073                                           "can't mark and flag in same flow");
3074         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3075                 return rte_flow_error_set(error, EINVAL,
3076                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3077                                           "can't have 2 flag"
3078                                           " actions in same flow");
3079         return 0;
3080 }
3081
3082 /**
3083  * Validate MARK action.
3084  *
3085  * @param[in] dev
3086  *   Pointer to the rte_eth_dev structure.
3087  * @param[in] action
3088  *   Pointer to action.
3089  * @param[in] action_flags
3090  *   Holds the actions detected until now.
3091  * @param[in] attr
3092  *   Pointer to flow attributes
3093  * @param[out] error
3094  *   Pointer to error structure.
3095  *
3096  * @return
3097  *   0 on success, a negative errno value otherwise and rte_errno is set.
3098  */
3099 static int
3100 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3101                              const struct rte_flow_action *action,
3102                              uint64_t action_flags,
3103                              const struct rte_flow_attr *attr,
3104                              struct rte_flow_error *error)
3105 {
3106         struct mlx5_priv *priv = dev->data->dev_private;
3107         struct mlx5_sh_config *config = &priv->sh->config;
3108         const struct rte_flow_action_mark *mark = action->conf;
3109         int ret;
3110
3111         if (is_tunnel_offload_active(dev))
3112                 return rte_flow_error_set(error, ENOTSUP,
3113                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3114                                           "no mark action "
3115                                           "if tunnel offload active");
3116         /* Fall back if no extended metadata register support. */
3117         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3118                 return mlx5_flow_validate_action_mark(action, action_flags,
3119                                                       attr, error);
3120         /* Extensive metadata mode requires registers. */
3121         if (!mlx5_flow_ext_mreg_supported(dev))
3122                 return rte_flow_error_set(error, ENOTSUP,
3123                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3124                                           "no metadata registers "
3125                                           "to support mark action");
3126         if (!priv->sh->dv_mark_mask)
3127                 return rte_flow_error_set(error, ENOTSUP,
3128                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3129                                           "extended metadata register"
3130                                           " isn't available");
3131         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3132         if (ret < 0)
3133                 return ret;
3134         MLX5_ASSERT(ret > 0);
3135         if (!mark)
3136                 return rte_flow_error_set(error, EINVAL,
3137                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3138                                           "configuration cannot be null");
3139         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3140                 return rte_flow_error_set(error, EINVAL,
3141                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3142                                           &mark->id,
3143                                           "mark id exceeds the limit");
3144         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3145                 return rte_flow_error_set(error, EINVAL,
3146                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3147                                           "can't flag and mark in same flow");
3148         if (action_flags & MLX5_FLOW_ACTION_MARK)
3149                 return rte_flow_error_set(error, EINVAL,
3150                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3151                                           "can't have 2 mark actions in same"
3152                                           " flow");
3153         return 0;
3154 }
3155
3156 /**
3157  * Validate SET_META action.
3158  *
3159  * @param[in] dev
3160  *   Pointer to the rte_eth_dev structure.
3161  * @param[in] action
3162  *   Pointer to the action structure.
3163  * @param[in] action_flags
3164  *   Holds the actions detected until now.
3165  * @param[in] attr
3166  *   Pointer to flow attributes
3167  * @param[out] error
3168  *   Pointer to error structure.
3169  *
3170  * @return
3171  *   0 on success, a negative errno value otherwise and rte_errno is set.
3172  */
3173 static int
3174 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3175                                  const struct rte_flow_action *action,
3176                                  uint64_t action_flags __rte_unused,
3177                                  const struct rte_flow_attr *attr,
3178                                  struct rte_flow_error *error)
3179 {
3180         struct mlx5_priv *priv = dev->data->dev_private;
3181         struct mlx5_sh_config *config = &priv->sh->config;
3182         const struct rte_flow_action_set_meta *conf;
3183         uint32_t nic_mask = UINT32_MAX;
3184         int reg;
3185
3186         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3187             !mlx5_flow_ext_mreg_supported(dev))
3188                 return rte_flow_error_set(error, ENOTSUP,
3189                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3190                                           "extended metadata register"
3191                                           " isn't supported");
3192         reg = flow_dv_get_metadata_reg(dev, attr, error);
3193         if (reg < 0)
3194                 return reg;
3195         if (reg == REG_NON)
3196                 return rte_flow_error_set(error, ENOTSUP,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "unavailable extended metadata register");
3199         if (reg != REG_A && reg != REG_B) {
3200                 struct mlx5_priv *priv = dev->data->dev_private;
3201
3202                 nic_mask = priv->sh->dv_meta_mask;
3203         }
3204         if (!(action->conf))
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "configuration cannot be null");
3208         conf = (const struct rte_flow_action_set_meta *)action->conf;
3209         if (!conf->mask)
3210                 return rte_flow_error_set(error, EINVAL,
3211                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3212                                           "zero mask doesn't have any effect");
3213         if (conf->mask & ~nic_mask)
3214                 return rte_flow_error_set(error, EINVAL,
3215                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3216                                           "meta data must be within reg C0");
3217         return 0;
3218 }
3219
3220 /**
3221  * Validate SET_TAG action.
3222  *
3223  * @param[in] dev
3224  *   Pointer to the rte_eth_dev structure.
3225  * @param[in] action
3226  *   Pointer to the action structure.
3227  * @param[in] action_flags
3228  *   Holds the actions detected until now.
3229  * @param[in] attr
3230  *   Pointer to flow attributes
3231  * @param[out] error
3232  *   Pointer to error structure.
3233  *
3234  * @return
3235  *   0 on success, a negative errno value otherwise and rte_errno is set.
3236  */
3237 static int
3238 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3239                                 const struct rte_flow_action *action,
3240                                 uint64_t action_flags,
3241                                 const struct rte_flow_attr *attr,
3242                                 struct rte_flow_error *error)
3243 {
3244         const struct rte_flow_action_set_tag *conf;
3245         const uint64_t terminal_action_flags =
3246                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3247                 MLX5_FLOW_ACTION_RSS;
3248         int ret;
3249
3250         if (!mlx5_flow_ext_mreg_supported(dev))
3251                 return rte_flow_error_set(error, ENOTSUP,
3252                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3253                                           "extensive metadata register"
3254                                           " isn't supported");
3255         if (!(action->conf))
3256                 return rte_flow_error_set(error, EINVAL,
3257                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3258                                           "configuration cannot be null");
3259         conf = (const struct rte_flow_action_set_tag *)action->conf;
3260         if (!conf->mask)
3261                 return rte_flow_error_set(error, EINVAL,
3262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3263                                           "zero mask doesn't have any effect");
3264         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3265         if (ret < 0)
3266                 return ret;
3267         if (!attr->transfer && attr->ingress &&
3268             (action_flags & terminal_action_flags))
3269                 return rte_flow_error_set(error, EINVAL,
3270                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3271                                           "set_tag has no effect"
3272                                           " with terminal actions");
3273         return 0;
3274 }
3275
3276 /**
3277  * Validate count action.
3278  *
3279  * @param[in] dev
3280  *   Pointer to rte_eth_dev structure.
3281  * @param[in] shared
3282  *   Indicator if action is shared.
3283  * @param[in] action_flags
3284  *   Holds the actions detected until now.
3285  * @param[out] error
3286  *   Pointer to error structure.
3287  *
3288  * @return
3289  *   0 on success, a negative errno value otherwise and rte_errno is set.
3290  */
3291 static int
3292 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3293                               uint64_t action_flags,
3294                               struct rte_flow_error *error)
3295 {
3296         struct mlx5_priv *priv = dev->data->dev_private;
3297
3298         if (!priv->sh->cdev->config.devx)
3299                 goto notsup_err;
3300         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3301                 return rte_flow_error_set(error, EINVAL,
3302                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3303                                           "duplicate count actions set");
3304         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3305             !priv->sh->flow_hit_aso_en)
3306                 return rte_flow_error_set(error, EINVAL,
3307                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3308                                           "old age and shared count combination is not supported");
3309 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3310         return 0;
3311 #endif
3312 notsup_err:
3313         return rte_flow_error_set
3314                       (error, ENOTSUP,
3315                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3316                        NULL,
3317                        "count action not supported");
3318 }
3319
3320 /**
3321  * Validate the L2 encap action.
3322  *
3323  * @param[in] dev
3324  *   Pointer to the rte_eth_dev structure.
3325  * @param[in] action_flags
3326  *   Holds the actions detected until now.
3327  * @param[in] action
3328  *   Pointer to the action structure.
3329  * @param[in] attr
3330  *   Pointer to flow attributes.
3331  * @param[out] error
3332  *   Pointer to error structure.
3333  *
3334  * @return
3335  *   0 on success, a negative errno value otherwise and rte_errno is set.
3336  */
3337 static int
3338 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3339                                  uint64_t action_flags,
3340                                  const struct rte_flow_action *action,
3341                                  const struct rte_flow_attr *attr,
3342                                  struct rte_flow_error *error)
3343 {
3344         const struct mlx5_priv *priv = dev->data->dev_private;
3345
3346         if (!(action->conf))
3347                 return rte_flow_error_set(error, EINVAL,
3348                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3349                                           "configuration cannot be null");
3350         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3351                 return rte_flow_error_set(error, EINVAL,
3352                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3353                                           "can only have a single encap action "
3354                                           "in a flow");
3355         if (!attr->transfer && priv->representor)
3356                 return rte_flow_error_set(error, ENOTSUP,
3357                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3358                                           "encap action for VF representor "
3359                                           "not supported on NIC table");
3360         return 0;
3361 }
3362
3363 /**
3364  * Validate a decap action.
3365  *
3366  * @param[in] dev
3367  *   Pointer to the rte_eth_dev structure.
3368  * @param[in] action_flags
3369  *   Holds the actions detected until now.
3370  * @param[in] action
3371  *   Pointer to the action structure.
3372  * @param[in] item_flags
3373  *   Holds the items detected.
3374  * @param[in] attr
3375  *   Pointer to flow attributes
3376  * @param[out] error
3377  *   Pointer to error structure.
3378  *
3379  * @return
3380  *   0 on success, a negative errno value otherwise and rte_errno is set.
3381  */
3382 static int
3383 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3384                               uint64_t action_flags,
3385                               const struct rte_flow_action *action,
3386                               const uint64_t item_flags,
3387                               const struct rte_flow_attr *attr,
3388                               struct rte_flow_error *error)
3389 {
3390         const struct mlx5_priv *priv = dev->data->dev_private;
3391
3392         if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
3393             !priv->sh->config.decap_en)
3394                 return rte_flow_error_set(error, ENOTSUP,
3395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3396                                           "decap is not enabled");
3397         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3398                 return rte_flow_error_set(error, ENOTSUP,
3399                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3400                                           action_flags &
3401                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3402                                           "have a single decap action" : "decap "
3403                                           "after encap is not supported");
3404         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3405                 return rte_flow_error_set(error, EINVAL,
3406                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3407                                           "can't have decap action after"
3408                                           " modify action");
3409         if (attr->egress)
3410                 return rte_flow_error_set(error, ENOTSUP,
3411                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3412                                           NULL,
3413                                           "decap action not supported for "
3414                                           "egress");
3415         if (!attr->transfer && priv->representor)
3416                 return rte_flow_error_set(error, ENOTSUP,
3417                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3418                                           "decap action for VF representor "
3419                                           "not supported on NIC table");
3420         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3421             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3422                 return rte_flow_error_set(error, ENOTSUP,
3423                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3424                                 "VXLAN item should be present for VXLAN decap");
3425         return 0;
3426 }
3427
3428 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3429
3430 /**
3431  * Validate the raw encap and decap actions.
3432  *
3433  * @param[in] dev
3434  *   Pointer to the rte_eth_dev structure.
3435  * @param[in] decap
3436  *   Pointer to the decap action.
3437  * @param[in] encap
3438  *   Pointer to the encap action.
3439  * @param[in] attr
3440  *   Pointer to flow attributes
3441  * @param[in/out] action_flags
3442  *   Holds the actions detected until now.
3443  * @param[out] actions_n
3444  *   pointer to the number of actions counter.
3445  * @param[in] action
3446  *   Pointer to the action structure.
3447  * @param[in] item_flags
3448  *   Holds the items detected.
3449  * @param[out] error
3450  *   Pointer to error structure.
3451  *
3452  * @return
3453  *   0 on success, a negative errno value otherwise and rte_errno is set.
3454  */
3455 static int
3456 flow_dv_validate_action_raw_encap_decap
3457         (struct rte_eth_dev *dev,
3458          const struct rte_flow_action_raw_decap *decap,
3459          const struct rte_flow_action_raw_encap *encap,
3460          const struct rte_flow_attr *attr, uint64_t *action_flags,
3461          int *actions_n, const struct rte_flow_action *action,
3462          uint64_t item_flags, struct rte_flow_error *error)
3463 {
3464         const struct mlx5_priv *priv = dev->data->dev_private;
3465         int ret;
3466
3467         if (encap && (!encap->size || !encap->data))
3468                 return rte_flow_error_set(error, EINVAL,
3469                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3470                                           "raw encap data cannot be empty");
3471         if (decap && encap) {
3472                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3473                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3474                         /* L3 encap. */
3475                         decap = NULL;
3476                 else if (encap->size <=
3477                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3478                            decap->size >
3479                            MLX5_ENCAPSULATION_DECISION_SIZE)
3480                         /* L3 decap. */
3481                         encap = NULL;
3482                 else if (encap->size >
3483                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3484                            decap->size >
3485                            MLX5_ENCAPSULATION_DECISION_SIZE)
3486                         /* 2 L2 actions: encap and decap. */
3487                         ;
3488                 else
3489                         return rte_flow_error_set(error,
3490                                 ENOTSUP,
3491                                 RTE_FLOW_ERROR_TYPE_ACTION,
3492                                 NULL, "unsupported too small "
3493                                 "raw decap and too small raw "
3494                                 "encap combination");
3495         }
3496         if (decap) {
3497                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3498                                                     item_flags, attr, error);
3499                 if (ret < 0)
3500                         return ret;
3501                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3502                 ++(*actions_n);
3503         }
3504         if (encap) {
3505                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3506                         return rte_flow_error_set(error, ENOTSUP,
3507                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3508                                                   NULL,
3509                                                   "small raw encap size");
3510                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3511                         return rte_flow_error_set(error, EINVAL,
3512                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3513                                                   NULL,
3514                                                   "more than one encap action");
3515                 if (!attr->transfer && priv->representor)
3516                         return rte_flow_error_set
3517                                         (error, ENOTSUP,
3518                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3519                                          "encap action for VF representor "
3520                                          "not supported on NIC table");
3521                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3522                 ++(*actions_n);
3523         }
3524         return 0;
3525 }
3526
3527 /*
3528  * Validate the ASO CT action.
3529  *
3530  * @param[in] dev
3531  *   Pointer to the rte_eth_dev structure.
3532  * @param[in] action_flags
3533  *   Holds the actions detected until now.
3534  * @param[in] item_flags
3535  *   The items found in this flow rule.
3536  * @param[in] attr
3537  *   Pointer to flow attributes.
3538  * @param[out] error
3539  *   Pointer to error structure.
3540  *
3541  * @return
3542  *   0 on success, a negative errno value otherwise and rte_errno is set.
3543  */
3544 static int
3545 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3546                                uint64_t action_flags,
3547                                uint64_t item_flags,
3548                                const struct rte_flow_attr *attr,
3549                                struct rte_flow_error *error)
3550 {
3551         RTE_SET_USED(dev);
3552
3553         if (attr->group == 0 && !attr->transfer)
3554                 return rte_flow_error_set(error, ENOTSUP,
3555                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3556                                           NULL,
3557                                           "Only support non-root table");
3558         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3559                 return rte_flow_error_set(error, ENOTSUP,
3560                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3561                                           "CT cannot follow a fate action");
3562         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3563             (action_flags & MLX5_FLOW_ACTION_AGE))
3564                 return rte_flow_error_set(error, EINVAL,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "Only one ASO action is supported");
3567         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3568                 return rte_flow_error_set(error, EINVAL,
3569                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3570                                           "Encap cannot exist before CT");
3571         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3572                 return rte_flow_error_set(error, EINVAL,
3573                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3574                                           "Not a outer TCP packet");
3575         return 0;
3576 }
3577
3578 int
3579 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3580                              struct mlx5_list_entry *entry, void *cb_ctx)
3581 {
3582         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3583         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3584         struct mlx5_flow_dv_encap_decap_resource *resource;
3585
3586         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3587                                 entry);
3588         if (resource->reformat_type == ctx_resource->reformat_type &&
3589             resource->ft_type == ctx_resource->ft_type &&
3590             resource->flags == ctx_resource->flags &&
3591             resource->size == ctx_resource->size &&
3592             !memcmp((const void *)resource->buf,
3593                     (const void *)ctx_resource->buf,
3594                     resource->size))
3595                 return 0;
3596         return -1;
3597 }
3598
3599 struct mlx5_list_entry *
3600 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3601 {
3602         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3603         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3604         struct mlx5dv_dr_domain *domain;
3605         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3606         struct mlx5_flow_dv_encap_decap_resource *resource;
3607         uint32_t idx;
3608         int ret;
3609
3610         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3611                 domain = sh->fdb_domain;
3612         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3613                 domain = sh->rx_domain;
3614         else
3615                 domain = sh->tx_domain;
3616         /* Register new encap/decap resource. */
3617         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3618         if (!resource) {
3619                 rte_flow_error_set(ctx->error, ENOMEM,
3620                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3621                                    "cannot allocate resource memory");
3622                 return NULL;
3623         }
3624         *resource = *ctx_resource;
3625         resource->idx = idx;
3626         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3627                                                               domain, resource,
3628                                                              &resource->action);
3629         if (ret) {
3630                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3631                 rte_flow_error_set(ctx->error, ENOMEM,
3632                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3633                                    NULL, "cannot create action");
3634                 return NULL;
3635         }
3636
3637         return &resource->entry;
3638 }
3639
3640 struct mlx5_list_entry *
3641 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3642                              void *cb_ctx)
3643 {
3644         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3645         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3646         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3647         uint32_t idx;
3648
3649         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3650                                            &idx);
3651         if (!cache_resource) {
3652                 rte_flow_error_set(ctx->error, ENOMEM,
3653                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3654                                    "cannot allocate resource memory");
3655                 return NULL;
3656         }
3657         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3658         cache_resource->idx = idx;
3659         return &cache_resource->entry;
3660 }
3661
3662 void
3663 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3664 {
3665         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3666         struct mlx5_flow_dv_encap_decap_resource *res =
3667                                        container_of(entry, typeof(*res), entry);
3668
3669         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3670 }
3671
3672 /**
3673  * Find existing encap/decap resource or create and register a new one.
3674  *
3675  * @param[in, out] dev
3676  *   Pointer to rte_eth_dev structure.
3677  * @param[in, out] resource
3678  *   Pointer to encap/decap resource.
3679  * @parm[in, out] dev_flow
3680  *   Pointer to the dev_flow.
3681  * @param[out] error
3682  *   pointer to error structure.
3683  *
3684  * @return
3685  *   0 on success otherwise -errno and errno is set.
3686  */
3687 static int
3688 flow_dv_encap_decap_resource_register
3689                         (struct rte_eth_dev *dev,
3690                          struct mlx5_flow_dv_encap_decap_resource *resource,
3691                          struct mlx5_flow *dev_flow,
3692                          struct rte_flow_error *error)
3693 {
3694         struct mlx5_priv *priv = dev->data->dev_private;
3695         struct mlx5_dev_ctx_shared *sh = priv->sh;
3696         struct mlx5_list_entry *entry;
3697         union {
3698                 struct {
3699                         uint32_t ft_type:8;
3700                         uint32_t refmt_type:8;
3701                         /*
3702                          * Header reformat actions can be shared between
3703                          * non-root tables. One bit to indicate non-root
3704                          * table or not.
3705                          */
3706                         uint32_t is_root:1;
3707                         uint32_t reserve:15;
3708                 };
3709                 uint32_t v32;
3710         } encap_decap_key = {
3711                 {
3712                         .ft_type = resource->ft_type,
3713                         .refmt_type = resource->reformat_type,
3714                         .is_root = !!dev_flow->dv.group,
3715                         .reserve = 0,
3716                 }
3717         };
3718         struct mlx5_flow_cb_ctx ctx = {
3719                 .error = error,
3720                 .data = resource,
3721         };
3722         struct mlx5_hlist *encaps_decaps;
3723         uint64_t key64;
3724
3725         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3726                                 "encaps_decaps",
3727                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3728                                 true, true, sh,
3729                                 flow_dv_encap_decap_create_cb,
3730                                 flow_dv_encap_decap_match_cb,
3731                                 flow_dv_encap_decap_remove_cb,
3732                                 flow_dv_encap_decap_clone_cb,
3733                                 flow_dv_encap_decap_clone_free_cb,
3734                                 error);
3735         if (unlikely(!encaps_decaps))
3736                 return -rte_errno;
3737         resource->flags = dev_flow->dv.group ? 0 : 1;
3738         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3739                                  sizeof(encap_decap_key.v32), 0);
3740         if (resource->reformat_type !=
3741             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3742             resource->size)
3743                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3744         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3745         if (!entry)
3746                 return -rte_errno;
3747         resource = container_of(entry, typeof(*resource), entry);
3748         dev_flow->dv.encap_decap = resource;
3749         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3750         return 0;
3751 }
3752
3753 /**
3754  * Find existing table jump resource or create and register a new one.
3755  *
3756  * @param[in, out] dev
3757  *   Pointer to rte_eth_dev structure.
3758  * @param[in, out] tbl
3759  *   Pointer to flow table resource.
3760  * @parm[in, out] dev_flow
3761  *   Pointer to the dev_flow.
3762  * @param[out] error
3763  *   pointer to error structure.
3764  *
3765  * @return
3766  *   0 on success otherwise -errno and errno is set.
3767  */
3768 static int
3769 flow_dv_jump_tbl_resource_register
3770                         (struct rte_eth_dev *dev __rte_unused,
3771                          struct mlx5_flow_tbl_resource *tbl,
3772                          struct mlx5_flow *dev_flow,
3773                          struct rte_flow_error *error __rte_unused)
3774 {
3775         struct mlx5_flow_tbl_data_entry *tbl_data =
3776                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3777
3778         MLX5_ASSERT(tbl);
3779         MLX5_ASSERT(tbl_data->jump.action);
3780         dev_flow->handle->rix_jump = tbl_data->idx;
3781         dev_flow->dv.jump = &tbl_data->jump;
3782         return 0;
3783 }
3784
3785 int
3786 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3787                          struct mlx5_list_entry *entry, void *cb_ctx)
3788 {
3789         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3790         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3791         struct mlx5_flow_dv_port_id_action_resource *res =
3792                                        container_of(entry, typeof(*res), entry);
3793
3794         return ref->port_id != res->port_id;
3795 }
3796
3797 struct mlx5_list_entry *
3798 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3799 {
3800         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3801         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3802         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3803         struct mlx5_flow_dv_port_id_action_resource *resource;
3804         uint32_t idx;
3805         int ret;
3806
3807         /* Register new port id action resource. */
3808         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3809         if (!resource) {
3810                 rte_flow_error_set(ctx->error, ENOMEM,
3811                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3812                                    "cannot allocate port_id action memory");
3813                 return NULL;
3814         }
3815         *resource = *ref;
3816         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3817                                                         ref->port_id,
3818                                                         &resource->action);
3819         if (ret) {
3820                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3821                 rte_flow_error_set(ctx->error, ENOMEM,
3822                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3823                                    "cannot create action");
3824                 return NULL;
3825         }
3826         resource->idx = idx;
3827         return &resource->entry;
3828 }
3829
3830 struct mlx5_list_entry *
3831 flow_dv_port_id_clone_cb(void *tool_ctx,
3832                          struct mlx5_list_entry *entry __rte_unused,
3833                          void *cb_ctx)
3834 {
3835         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3836         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3837         struct mlx5_flow_dv_port_id_action_resource *resource;
3838         uint32_t idx;
3839
3840         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3841         if (!resource) {
3842                 rte_flow_error_set(ctx->error, ENOMEM,
3843                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3844                                    "cannot allocate port_id action memory");
3845                 return NULL;
3846         }
3847         memcpy(resource, entry, sizeof(*resource));
3848         resource->idx = idx;
3849         return &resource->entry;
3850 }
3851
3852 void
3853 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3854 {
3855         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3856         struct mlx5_flow_dv_port_id_action_resource *resource =
3857                                   container_of(entry, typeof(*resource), entry);
3858
3859         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3860 }
3861
3862 /**
3863  * Find existing table port ID resource or create and register a new one.
3864  *
3865  * @param[in, out] dev
3866  *   Pointer to rte_eth_dev structure.
3867  * @param[in, out] ref
3868  *   Pointer to port ID action resource reference.
3869  * @parm[in, out] dev_flow
3870  *   Pointer to the dev_flow.
3871  * @param[out] error
3872  *   pointer to error structure.
3873  *
3874  * @return
3875  *   0 on success otherwise -errno and errno is set.
3876  */
3877 static int
3878 flow_dv_port_id_action_resource_register
3879                         (struct rte_eth_dev *dev,
3880                          struct mlx5_flow_dv_port_id_action_resource *ref,
3881                          struct mlx5_flow *dev_flow,
3882                          struct rte_flow_error *error)
3883 {
3884         struct mlx5_priv *priv = dev->data->dev_private;
3885         struct mlx5_list_entry *entry;
3886         struct mlx5_flow_dv_port_id_action_resource *resource;
3887         struct mlx5_flow_cb_ctx ctx = {
3888                 .error = error,
3889                 .data = ref,
3890         };
3891
3892         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3893         if (!entry)
3894                 return -rte_errno;
3895         resource = container_of(entry, typeof(*resource), entry);
3896         dev_flow->dv.port_id_action = resource;
3897         dev_flow->handle->rix_port_id_action = resource->idx;
3898         return 0;
3899 }
3900
3901 int
3902 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3903                            struct mlx5_list_entry *entry, void *cb_ctx)
3904 {
3905         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3906         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3907         struct mlx5_flow_dv_push_vlan_action_resource *res =
3908                                        container_of(entry, typeof(*res), entry);
3909
3910         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3911 }
3912
3913 struct mlx5_list_entry *
3914 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3915 {
3916         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3917         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3918         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3919         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3920         struct mlx5dv_dr_domain *domain;
3921         uint32_t idx;
3922         int ret;
3923
3924         /* Register new port id action resource. */
3925         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3926         if (!resource) {
3927                 rte_flow_error_set(ctx->error, ENOMEM,
3928                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3929                                    "cannot allocate push_vlan action memory");
3930                 return NULL;
3931         }
3932         *resource = *ref;
3933         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3934                 domain = sh->fdb_domain;
3935         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3936                 domain = sh->rx_domain;
3937         else
3938                 domain = sh->tx_domain;
3939         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3940                                                         &resource->action);
3941         if (ret) {
3942                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3943                 rte_flow_error_set(ctx->error, ENOMEM,
3944                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3945                                    "cannot create push vlan action");
3946                 return NULL;
3947         }
3948         resource->idx = idx;
3949         return &resource->entry;
3950 }
3951
3952 struct mlx5_list_entry *
3953 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3954                            struct mlx5_list_entry *entry __rte_unused,
3955                            void *cb_ctx)
3956 {
3957         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3958         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3959         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3960         uint32_t idx;
3961
3962         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3963         if (!resource) {
3964                 rte_flow_error_set(ctx->error, ENOMEM,
3965                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3966                                    "cannot allocate push_vlan action memory");
3967                 return NULL;
3968         }
3969         memcpy(resource, entry, sizeof(*resource));
3970         resource->idx = idx;
3971         return &resource->entry;
3972 }
3973
3974 void
3975 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3976 {
3977         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3978         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3979                                   container_of(entry, typeof(*resource), entry);
3980
3981         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3982 }
3983
3984 /**
3985  * Find existing push vlan resource or create and register a new one.
3986  *
3987  * @param [in, out] dev
3988  *   Pointer to rte_eth_dev structure.
3989  * @param[in, out] ref
3990  *   Pointer to port ID action resource reference.
3991  * @parm[in, out] dev_flow
3992  *   Pointer to the dev_flow.
3993  * @param[out] error
3994  *   pointer to error structure.
3995  *
3996  * @return
3997  *   0 on success otherwise -errno and errno is set.
3998  */
3999 static int
4000 flow_dv_push_vlan_action_resource_register
4001                        (struct rte_eth_dev *dev,
4002                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4003                         struct mlx5_flow *dev_flow,
4004                         struct rte_flow_error *error)
4005 {
4006         struct mlx5_priv *priv = dev->data->dev_private;
4007         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4008         struct mlx5_list_entry *entry;
4009         struct mlx5_flow_cb_ctx ctx = {
4010                 .error = error,
4011                 .data = ref,
4012         };
4013
4014         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4015         if (!entry)
4016                 return -rte_errno;
4017         resource = container_of(entry, typeof(*resource), entry);
4018
4019         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4020         dev_flow->dv.push_vlan_res = resource;
4021         return 0;
4022 }
4023
4024 /**
4025  * Get the size of specific rte_flow_item_type hdr size
4026  *
4027  * @param[in] item_type
4028  *   Tested rte_flow_item_type.
4029  *
4030  * @return
4031  *   sizeof struct item_type, 0 if void or irrelevant.
4032  */
4033 static size_t
4034 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4035 {
4036         size_t retval;
4037
4038         switch (item_type) {
4039         case RTE_FLOW_ITEM_TYPE_ETH:
4040                 retval = sizeof(struct rte_ether_hdr);
4041                 break;
4042         case RTE_FLOW_ITEM_TYPE_VLAN:
4043                 retval = sizeof(struct rte_vlan_hdr);
4044                 break;
4045         case RTE_FLOW_ITEM_TYPE_IPV4:
4046                 retval = sizeof(struct rte_ipv4_hdr);
4047                 break;
4048         case RTE_FLOW_ITEM_TYPE_IPV6:
4049                 retval = sizeof(struct rte_ipv6_hdr);
4050                 break;
4051         case RTE_FLOW_ITEM_TYPE_UDP:
4052                 retval = sizeof(struct rte_udp_hdr);
4053                 break;
4054         case RTE_FLOW_ITEM_TYPE_TCP:
4055                 retval = sizeof(struct rte_tcp_hdr);
4056                 break;
4057         case RTE_FLOW_ITEM_TYPE_VXLAN:
4058         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4059                 retval = sizeof(struct rte_vxlan_hdr);
4060                 break;
4061         case RTE_FLOW_ITEM_TYPE_GRE:
4062         case RTE_FLOW_ITEM_TYPE_NVGRE:
4063                 retval = sizeof(struct rte_gre_hdr);
4064                 break;
4065         case RTE_FLOW_ITEM_TYPE_MPLS:
4066                 retval = sizeof(struct rte_mpls_hdr);
4067                 break;
4068         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4069         default:
4070                 retval = 0;
4071                 break;
4072         }
4073         return retval;
4074 }
4075
4076 #define MLX5_ENCAP_IPV4_VERSION         0x40
4077 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4078 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4079 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4080 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4081 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4082 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4083
4084 /**
4085  * Convert the encap action data from list of rte_flow_item to raw buffer
4086  *
4087  * @param[in] items
4088  *   Pointer to rte_flow_item objects list.
4089  * @param[out] buf
4090  *   Pointer to the output buffer.
4091  * @param[out] size
4092  *   Pointer to the output buffer size.
4093  * @param[out] error
4094  *   Pointer to the error structure.
4095  *
4096  * @return
4097  *   0 on success, a negative errno value otherwise and rte_errno is set.
4098  */
4099 static int
4100 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4101                            size_t *size, struct rte_flow_error *error)
4102 {
4103         struct rte_ether_hdr *eth = NULL;
4104         struct rte_vlan_hdr *vlan = NULL;
4105         struct rte_ipv4_hdr *ipv4 = NULL;
4106         struct rte_ipv6_hdr *ipv6 = NULL;
4107         struct rte_udp_hdr *udp = NULL;
4108         struct rte_vxlan_hdr *vxlan = NULL;
4109         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4110         struct rte_gre_hdr *gre = NULL;
4111         size_t len;
4112         size_t temp_size = 0;
4113
4114         if (!items)
4115                 return rte_flow_error_set(error, EINVAL,
4116                                           RTE_FLOW_ERROR_TYPE_ACTION,
4117                                           NULL, "invalid empty data");
4118         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4119                 len = flow_dv_get_item_hdr_len(items->type);
4120                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4121                         return rte_flow_error_set(error, EINVAL,
4122                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4123                                                   (void *)items->type,
4124                                                   "items total size is too big"
4125                                                   " for encap action");
4126                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4127                 switch (items->type) {
4128                 case RTE_FLOW_ITEM_TYPE_ETH:
4129                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4130                         break;
4131                 case RTE_FLOW_ITEM_TYPE_VLAN:
4132                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4133                         if (!eth)
4134                                 return rte_flow_error_set(error, EINVAL,
4135                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4136                                                 (void *)items->type,
4137                                                 "eth header not found");
4138                         if (!eth->ether_type)
4139                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4140                         break;
4141                 case RTE_FLOW_ITEM_TYPE_IPV4:
4142                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4143                         if (!vlan && !eth)
4144                                 return rte_flow_error_set(error, EINVAL,
4145                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4146                                                 (void *)items->type,
4147                                                 "neither eth nor vlan"
4148                                                 " header found");
4149                         if (vlan && !vlan->eth_proto)
4150                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4151                         else if (eth && !eth->ether_type)
4152                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4153                         if (!ipv4->version_ihl)
4154                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4155                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4156                         if (!ipv4->time_to_live)
4157                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4158                         break;
4159                 case RTE_FLOW_ITEM_TYPE_IPV6:
4160                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4161                         if (!vlan && !eth)
4162                                 return rte_flow_error_set(error, EINVAL,
4163                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4164                                                 (void *)items->type,
4165                                                 "neither eth nor vlan"
4166                                                 " header found");
4167                         if (vlan && !vlan->eth_proto)
4168                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4169                         else if (eth && !eth->ether_type)
4170                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4171                         if (!ipv6->vtc_flow)
4172                                 ipv6->vtc_flow =
4173                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4174                         if (!ipv6->hop_limits)
4175                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4176                         break;
4177                 case RTE_FLOW_ITEM_TYPE_UDP:
4178                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4179                         if (!ipv4 && !ipv6)
4180                                 return rte_flow_error_set(error, EINVAL,
4181                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4182                                                 (void *)items->type,
4183                                                 "ip header not found");
4184                         if (ipv4 && !ipv4->next_proto_id)
4185                                 ipv4->next_proto_id = IPPROTO_UDP;
4186                         else if (ipv6 && !ipv6->proto)
4187                                 ipv6->proto = IPPROTO_UDP;
4188                         break;
4189                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4190                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4191                         if (!udp)
4192                                 return rte_flow_error_set(error, EINVAL,
4193                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4194                                                 (void *)items->type,
4195                                                 "udp header not found");
4196                         if (!udp->dst_port)
4197                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4198                         if (!vxlan->vx_flags)
4199                                 vxlan->vx_flags =
4200                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4201                         break;
4202                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4203                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4204                         if (!udp)
4205                                 return rte_flow_error_set(error, EINVAL,
4206                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4207                                                 (void *)items->type,
4208                                                 "udp header not found");
4209                         if (!vxlan_gpe->proto)
4210                                 return rte_flow_error_set(error, EINVAL,
4211                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4212                                                 (void *)items->type,
4213                                                 "next protocol not found");
4214                         if (!udp->dst_port)
4215                                 udp->dst_port =
4216                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4217                         if (!vxlan_gpe->vx_flags)
4218                                 vxlan_gpe->vx_flags =
4219                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4220                         break;
4221                 case RTE_FLOW_ITEM_TYPE_GRE:
4222                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4223                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4224                         if (!gre->proto)
4225                                 return rte_flow_error_set(error, EINVAL,
4226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4227                                                 (void *)items->type,
4228                                                 "next protocol not found");
4229                         if (!ipv4 && !ipv6)
4230                                 return rte_flow_error_set(error, EINVAL,
4231                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4232                                                 (void *)items->type,
4233                                                 "ip header not found");
4234                         if (ipv4 && !ipv4->next_proto_id)
4235                                 ipv4->next_proto_id = IPPROTO_GRE;
4236                         else if (ipv6 && !ipv6->proto)
4237                                 ipv6->proto = IPPROTO_GRE;
4238                         break;
4239                 case RTE_FLOW_ITEM_TYPE_VOID:
4240                         break;
4241                 default:
4242                         return rte_flow_error_set(error, EINVAL,
4243                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4244                                                   (void *)items->type,
4245                                                   "unsupported item type");
4246                         break;
4247                 }
4248                 temp_size += len;
4249         }
4250         *size = temp_size;
4251         return 0;
4252 }
4253
4254 static int
4255 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4256 {
4257         struct rte_ether_hdr *eth = NULL;
4258         struct rte_vlan_hdr *vlan = NULL;
4259         struct rte_ipv6_hdr *ipv6 = NULL;
4260         struct rte_udp_hdr *udp = NULL;
4261         char *next_hdr;
4262         uint16_t proto;
4263
4264         eth = (struct rte_ether_hdr *)data;
4265         next_hdr = (char *)(eth + 1);
4266         proto = RTE_BE16(eth->ether_type);
4267
4268         /* VLAN skipping */
4269         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4270                 vlan = (struct rte_vlan_hdr *)next_hdr;
4271                 proto = RTE_BE16(vlan->eth_proto);
4272                 next_hdr += sizeof(struct rte_vlan_hdr);
4273         }
4274
4275         /* HW calculates IPv4 csum. no need to proceed */
4276         if (proto == RTE_ETHER_TYPE_IPV4)
4277                 return 0;
4278
4279         /* non IPv4/IPv6 header. not supported */
4280         if (proto != RTE_ETHER_TYPE_IPV6) {
4281                 return rte_flow_error_set(error, ENOTSUP,
4282                                           RTE_FLOW_ERROR_TYPE_ACTION,
4283                                           NULL, "Cannot offload non IPv4/IPv6");
4284         }
4285
4286         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4287
4288         /* ignore non UDP */
4289         if (ipv6->proto != IPPROTO_UDP)
4290                 return 0;
4291
4292         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4293         udp->dgram_cksum = 0;
4294
4295         return 0;
4296 }
4297
4298 /**
4299  * Convert L2 encap action to DV specification.
4300  *
4301  * @param[in] dev
4302  *   Pointer to rte_eth_dev structure.
4303  * @param[in] action
4304  *   Pointer to action structure.
4305  * @param[in, out] dev_flow
4306  *   Pointer to the mlx5_flow.
4307  * @param[in] transfer
4308  *   Mark if the flow is E-Switch flow.
4309  * @param[out] error
4310  *   Pointer to the error structure.
4311  *
4312  * @return
4313  *   0 on success, a negative errno value otherwise and rte_errno is set.
4314  */
4315 static int
4316 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4317                                const struct rte_flow_action *action,
4318                                struct mlx5_flow *dev_flow,
4319                                uint8_t transfer,
4320                                struct rte_flow_error *error)
4321 {
4322         const struct rte_flow_item *encap_data;
4323         const struct rte_flow_action_raw_encap *raw_encap_data;
4324         struct mlx5_flow_dv_encap_decap_resource res = {
4325                 .reformat_type =
4326                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4327                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4328                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4329         };
4330
4331         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4332                 raw_encap_data =
4333                         (const struct rte_flow_action_raw_encap *)action->conf;
4334                 res.size = raw_encap_data->size;
4335                 memcpy(res.buf, raw_encap_data->data, res.size);
4336         } else {
4337                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4338                         encap_data =
4339                                 ((const struct rte_flow_action_vxlan_encap *)
4340                                                 action->conf)->definition;
4341                 else
4342                         encap_data =
4343                                 ((const struct rte_flow_action_nvgre_encap *)
4344                                                 action->conf)->definition;
4345                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4346                                                &res.size, error))
4347                         return -rte_errno;
4348         }
4349         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4350                 return -rte_errno;
4351         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4352                 return rte_flow_error_set(error, EINVAL,
4353                                           RTE_FLOW_ERROR_TYPE_ACTION,
4354                                           NULL, "can't create L2 encap action");
4355         return 0;
4356 }
4357
4358 /**
4359  * Convert L2 decap action to DV specification.
4360  *
4361  * @param[in] dev
4362  *   Pointer to rte_eth_dev structure.
4363  * @param[in, out] dev_flow
4364  *   Pointer to the mlx5_flow.
4365  * @param[in] transfer
4366  *   Mark if the flow is E-Switch flow.
4367  * @param[out] error
4368  *   Pointer to the error structure.
4369  *
4370  * @return
4371  *   0 on success, a negative errno value otherwise and rte_errno is set.
4372  */
4373 static int
4374 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4375                                struct mlx5_flow *dev_flow,
4376                                uint8_t transfer,
4377                                struct rte_flow_error *error)
4378 {
4379         struct mlx5_flow_dv_encap_decap_resource res = {
4380                 .size = 0,
4381                 .reformat_type =
4382                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4383                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4384                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4385         };
4386
4387         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4388                 return rte_flow_error_set(error, EINVAL,
4389                                           RTE_FLOW_ERROR_TYPE_ACTION,
4390                                           NULL, "can't create L2 decap action");
4391         return 0;
4392 }
4393
4394 /**
4395  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4396  *
4397  * @param[in] dev
4398  *   Pointer to rte_eth_dev structure.
4399  * @param[in] action
4400  *   Pointer to action structure.
4401  * @param[in, out] dev_flow
4402  *   Pointer to the mlx5_flow.
4403  * @param[in] attr
4404  *   Pointer to the flow attributes.
4405  * @param[out] error
4406  *   Pointer to the error structure.
4407  *
4408  * @return
4409  *   0 on success, a negative errno value otherwise and rte_errno is set.
4410  */
4411 static int
4412 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4413                                 const struct rte_flow_action *action,
4414                                 struct mlx5_flow *dev_flow,
4415                                 const struct rte_flow_attr *attr,
4416                                 struct rte_flow_error *error)
4417 {
4418         const struct rte_flow_action_raw_encap *encap_data;
4419         struct mlx5_flow_dv_encap_decap_resource res;
4420
4421         memset(&res, 0, sizeof(res));
4422         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4423         res.size = encap_data->size;
4424         memcpy(res.buf, encap_data->data, res.size);
4425         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4426                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4427                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4428         if (attr->transfer)
4429                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4430         else
4431                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4432                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4433         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4434                 return rte_flow_error_set(error, EINVAL,
4435                                           RTE_FLOW_ERROR_TYPE_ACTION,
4436                                           NULL, "can't create encap action");
4437         return 0;
4438 }
4439
4440 /**
4441  * Create action push VLAN.
4442  *
4443  * @param[in] dev
4444  *   Pointer to rte_eth_dev structure.
4445  * @param[in] attr
4446  *   Pointer to the flow attributes.
4447  * @param[in] vlan
4448  *   Pointer to the vlan to push to the Ethernet header.
4449  * @param[in, out] dev_flow
4450  *   Pointer to the mlx5_flow.
4451  * @param[out] error
4452  *   Pointer to the error structure.
4453  *
4454  * @return
4455  *   0 on success, a negative errno value otherwise and rte_errno is set.
4456  */
4457 static int
4458 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4459                                 const struct rte_flow_attr *attr,
4460                                 const struct rte_vlan_hdr *vlan,
4461                                 struct mlx5_flow *dev_flow,
4462                                 struct rte_flow_error *error)
4463 {
4464         struct mlx5_flow_dv_push_vlan_action_resource res;
4465
4466         memset(&res, 0, sizeof(res));
4467         res.vlan_tag =
4468                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4469                                  vlan->vlan_tci);
4470         if (attr->transfer)
4471                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4472         else
4473                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4474                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4475         return flow_dv_push_vlan_action_resource_register
4476                                             (dev, &res, dev_flow, error);
4477 }
4478
4479 /**
4480  * Validate the modify-header actions.
4481  *
4482  * @param[in] action_flags
4483  *   Holds the actions detected until now.
4484  * @param[in] action
4485  *   Pointer to the modify action.
4486  * @param[out] error
4487  *   Pointer to error structure.
4488  *
4489  * @return
4490  *   0 on success, a negative errno value otherwise and rte_errno is set.
4491  */
4492 static int
4493 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4494                                    const struct rte_flow_action *action,
4495                                    struct rte_flow_error *error)
4496 {
4497         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4498                 return rte_flow_error_set(error, EINVAL,
4499                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4500                                           NULL, "action configuration not set");
4501         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4502                 return rte_flow_error_set(error, EINVAL,
4503                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4504                                           "can't have encap action before"
4505                                           " modify action");
4506         return 0;
4507 }
4508
4509 /**
4510  * Validate the modify-header MAC address actions.
4511  *
4512  * @param[in] action_flags
4513  *   Holds the actions detected until now.
4514  * @param[in] action
4515  *   Pointer to the modify action.
4516  * @param[in] item_flags
4517  *   Holds the items detected.
4518  * @param[out] error
4519  *   Pointer to error structure.
4520  *
4521  * @return
4522  *   0 on success, a negative errno value otherwise and rte_errno is set.
4523  */
4524 static int
4525 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4526                                    const struct rte_flow_action *action,
4527                                    const uint64_t item_flags,
4528                                    struct rte_flow_error *error)
4529 {
4530         int ret = 0;
4531
4532         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4533         if (!ret) {
4534                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4535                         return rte_flow_error_set(error, EINVAL,
4536                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4537                                                   NULL,
4538                                                   "no L2 item in pattern");
4539         }
4540         return ret;
4541 }
4542
4543 /**
4544  * Validate the modify-header IPv4 address actions.
4545  *
4546  * @param[in] action_flags
4547  *   Holds the actions detected until now.
4548  * @param[in] action
4549  *   Pointer to the modify action.
4550  * @param[in] item_flags
4551  *   Holds the items detected.
4552  * @param[out] error
4553  *   Pointer to error structure.
4554  *
4555  * @return
4556  *   0 on success, a negative errno value otherwise and rte_errno is set.
4557  */
4558 static int
4559 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4560                                     const struct rte_flow_action *action,
4561                                     const uint64_t item_flags,
4562                                     struct rte_flow_error *error)
4563 {
4564         int ret = 0;
4565         uint64_t layer;
4566
4567         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4568         if (!ret) {
4569                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4570                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4571                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4572                 if (!(item_flags & layer))
4573                         return rte_flow_error_set(error, EINVAL,
4574                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4575                                                   NULL,
4576                                                   "no ipv4 item in pattern");
4577         }
4578         return ret;
4579 }
4580
4581 /**
4582  * Validate the modify-header IPv6 address actions.
4583  *
4584  * @param[in] action_flags
4585  *   Holds the actions detected until now.
4586  * @param[in] action
4587  *   Pointer to the modify action.
4588  * @param[in] item_flags
4589  *   Holds the items detected.
4590  * @param[out] error
4591  *   Pointer to error structure.
4592  *
4593  * @return
4594  *   0 on success, a negative errno value otherwise and rte_errno is set.
4595  */
4596 static int
4597 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4598                                     const struct rte_flow_action *action,
4599                                     const uint64_t item_flags,
4600                                     struct rte_flow_error *error)
4601 {
4602         int ret = 0;
4603         uint64_t layer;
4604
4605         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4606         if (!ret) {
4607                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4608                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4609                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4610                 if (!(item_flags & layer))
4611                         return rte_flow_error_set(error, EINVAL,
4612                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4613                                                   NULL,
4614                                                   "no ipv6 item in pattern");
4615         }
4616         return ret;
4617 }
4618
4619 /**
4620  * Validate the modify-header TP actions.
4621  *
4622  * @param[in] action_flags
4623  *   Holds the actions detected until now.
4624  * @param[in] action
4625  *   Pointer to the modify action.
4626  * @param[in] item_flags
4627  *   Holds the items detected.
4628  * @param[out] error
4629  *   Pointer to error structure.
4630  *
4631  * @return
4632  *   0 on success, a negative errno value otherwise and rte_errno is set.
4633  */
4634 static int
4635 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4636                                   const struct rte_flow_action *action,
4637                                   const uint64_t item_flags,
4638                                   struct rte_flow_error *error)
4639 {
4640         int ret = 0;
4641         uint64_t layer;
4642
4643         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4644         if (!ret) {
4645                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4646                                  MLX5_FLOW_LAYER_INNER_L4 :
4647                                  MLX5_FLOW_LAYER_OUTER_L4;
4648                 if (!(item_flags & layer))
4649                         return rte_flow_error_set(error, EINVAL,
4650                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4651                                                   NULL, "no transport layer "
4652                                                   "in pattern");
4653         }
4654         return ret;
4655 }
4656
4657 /**
4658  * Validate the modify-header actions of increment/decrement
4659  * TCP Sequence-number.
4660  *
4661  * @param[in] action_flags
4662  *   Holds the actions detected until now.
4663  * @param[in] action
4664  *   Pointer to the modify action.
4665  * @param[in] item_flags
4666  *   Holds the items detected.
4667  * @param[out] error
4668  *   Pointer to error structure.
4669  *
4670  * @return
4671  *   0 on success, a negative errno value otherwise and rte_errno is set.
4672  */
4673 static int
4674 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4675                                        const struct rte_flow_action *action,
4676                                        const uint64_t item_flags,
4677                                        struct rte_flow_error *error)
4678 {
4679         int ret = 0;
4680         uint64_t layer;
4681
4682         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4683         if (!ret) {
4684                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4685                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4686                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4687                 if (!(item_flags & layer))
4688                         return rte_flow_error_set(error, EINVAL,
4689                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4690                                                   NULL, "no TCP item in"
4691                                                   " pattern");
4692                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4693                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4694                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4695                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4696                         return rte_flow_error_set(error, EINVAL,
4697                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4698                                                   NULL,
4699                                                   "cannot decrease and increase"
4700                                                   " TCP sequence number"
4701                                                   " at the same time");
4702         }
4703         return ret;
4704 }
4705
4706 /**
4707  * Validate the modify-header actions of increment/decrement
4708  * TCP Acknowledgment number.
4709  *
4710  * @param[in] action_flags
4711  *   Holds the actions detected until now.
4712  * @param[in] action
4713  *   Pointer to the modify action.
4714  * @param[in] item_flags
4715  *   Holds the items detected.
4716  * @param[out] error
4717  *   Pointer to error structure.
4718  *
4719  * @return
4720  *   0 on success, a negative errno value otherwise and rte_errno is set.
4721  */
4722 static int
4723 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4724                                        const struct rte_flow_action *action,
4725                                        const uint64_t item_flags,
4726                                        struct rte_flow_error *error)
4727 {
4728         int ret = 0;
4729         uint64_t layer;
4730
4731         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4732         if (!ret) {
4733                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4734                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4735                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4736                 if (!(item_flags & layer))
4737                         return rte_flow_error_set(error, EINVAL,
4738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4739                                                   NULL, "no TCP item in"
4740                                                   " pattern");
4741                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4742                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4743                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4744                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4745                         return rte_flow_error_set(error, EINVAL,
4746                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4747                                                   NULL,
4748                                                   "cannot decrease and increase"
4749                                                   " TCP acknowledgment number"
4750                                                   " at the same time");
4751         }
4752         return ret;
4753 }
4754
4755 /**
4756  * Validate the modify-header TTL actions.
4757  *
4758  * @param[in] action_flags
4759  *   Holds the actions detected until now.
4760  * @param[in] action
4761  *   Pointer to the modify action.
4762  * @param[in] item_flags
4763  *   Holds the items detected.
4764  * @param[out] error
4765  *   Pointer to error structure.
4766  *
4767  * @return
4768  *   0 on success, a negative errno value otherwise and rte_errno is set.
4769  */
4770 static int
4771 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4772                                    const struct rte_flow_action *action,
4773                                    const uint64_t item_flags,
4774                                    struct rte_flow_error *error)
4775 {
4776         int ret = 0;
4777         uint64_t layer;
4778
4779         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4780         if (!ret) {
4781                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4782                                  MLX5_FLOW_LAYER_INNER_L3 :
4783                                  MLX5_FLOW_LAYER_OUTER_L3;
4784                 if (!(item_flags & layer))
4785                         return rte_flow_error_set(error, EINVAL,
4786                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4787                                                   NULL,
4788                                                   "no IP protocol in pattern");
4789         }
4790         return ret;
4791 }
4792
4793 /**
4794  * Validate the generic modify field actions.
4795  * @param[in] dev
4796  *   Pointer to the rte_eth_dev structure.
4797  * @param[in] action_flags
4798  *   Holds the actions detected until now.
4799  * @param[in] action
4800  *   Pointer to the modify action.
4801  * @param[in] attr
4802  *   Pointer to the flow attributes.
4803  * @param[out] error
4804  *   Pointer to error structure.
4805  *
4806  * @return
4807  *   Number of header fields to modify (0 or more) on success,
4808  *   a negative errno value otherwise and rte_errno is set.
4809  */
4810 static int
4811 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4812                                    const uint64_t action_flags,
4813                                    const struct rte_flow_action *action,
4814                                    const struct rte_flow_attr *attr,
4815                                    struct rte_flow_error *error)
4816 {
4817         int ret = 0;
4818         struct mlx5_priv *priv = dev->data->dev_private;
4819         struct mlx5_sh_config *config = &priv->sh->config;
4820         const struct rte_flow_action_modify_field *action_modify_field =
4821                 action->conf;
4822         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4823                                 action_modify_field->dst.field,
4824                                 -1, attr, error);
4825         uint32_t src_width = mlx5_flow_item_field_width(dev,
4826                                 action_modify_field->src.field,
4827                                 dst_width, attr, error);
4828
4829         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4830         if (ret)
4831                 return ret;
4832
4833         if (action_modify_field->width == 0)
4834                 return rte_flow_error_set(error, EINVAL,
4835                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4836                                 "no bits are requested to be modified");
4837         else if (action_modify_field->width > dst_width ||
4838                  action_modify_field->width > src_width)
4839                 return rte_flow_error_set(error, EINVAL,
4840                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4841                                 "cannot modify more bits than"
4842                                 " the width of a field");
4843         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4844             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4845                 if ((action_modify_field->dst.offset +
4846                      action_modify_field->width > dst_width) ||
4847                     (action_modify_field->dst.offset % 32))
4848                         return rte_flow_error_set(error, EINVAL,
4849                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4850                                         "destination offset is too big"
4851                                         " or not aligned to 4 bytes");
4852                 if (action_modify_field->dst.level &&
4853                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4854                         return rte_flow_error_set(error, ENOTSUP,
4855                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4856                                         "inner header fields modification"
4857                                         " is not supported");
4858         }
4859         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4860             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4861                 if (!attr->transfer && !attr->group)
4862                         return rte_flow_error_set(error, ENOTSUP,
4863                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4864                                         "modify field action is not"
4865                                         " supported for group 0");
4866                 if ((action_modify_field->src.offset +
4867                      action_modify_field->width > src_width) ||
4868                     (action_modify_field->src.offset % 32))
4869                         return rte_flow_error_set(error, EINVAL,
4870                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4871                                         "source offset is too big"
4872                                         " or not aligned to 4 bytes");
4873                 if (action_modify_field->src.level &&
4874                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4875                         return rte_flow_error_set(error, ENOTSUP,
4876                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4877                                         "inner header fields modification"
4878                                         " is not supported");
4879         }
4880         if ((action_modify_field->dst.field ==
4881              action_modify_field->src.field) &&
4882             (action_modify_field->dst.level ==
4883              action_modify_field->src.level))
4884                 return rte_flow_error_set(error, EINVAL,
4885                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4886                                 "source and destination fields"
4887                                 " cannot be the same");
4888         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4889             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4890             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4891                 return rte_flow_error_set(error, EINVAL,
4892                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4893                                 "mark, immediate value or a pointer to it"
4894                                 " cannot be used as a destination");
4895         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4896             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4897                 return rte_flow_error_set(error, ENOTSUP,
4898                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4899                                 "modifications of an arbitrary"
4900                                 " place in a packet is not supported");
4901         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4902             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4905                                 "modifications of the 802.1Q Tag"
4906                                 " Identifier is not supported");
4907         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4908             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4909                 return rte_flow_error_set(error, ENOTSUP,
4910                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4911                                 "modifications of the VXLAN Network"
4912                                 " Identifier is not supported");
4913         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4914             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4915                 return rte_flow_error_set(error, ENOTSUP,
4916                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4917                                 "modifications of the GENEVE Network"
4918                                 " Identifier is not supported");
4919         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4920             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4921                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4922                     !mlx5_flow_ext_mreg_supported(dev))
4923                         return rte_flow_error_set(error, ENOTSUP,
4924                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4925                                         "cannot modify mark in legacy mode"
4926                                         " or without extensive registers");
4927         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4928             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4929                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4930                     !mlx5_flow_ext_mreg_supported(dev))
4931                         return rte_flow_error_set(error, ENOTSUP,
4932                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4933                                         "cannot modify meta without"
4934                                         " extensive registers support");
4935                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4936                 if (ret < 0 || ret == REG_NON)
4937                         return rte_flow_error_set(error, ENOTSUP,
4938                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4939                                         "cannot modify meta without"
4940                                         " extensive registers available");
4941         }
4942         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4943                 return rte_flow_error_set(error, ENOTSUP,
4944                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4945                                 "add and sub operations"
4946                                 " are not supported");
4947         return (action_modify_field->width / 32) +
4948                !!(action_modify_field->width % 32);
4949 }
4950
4951 /**
4952  * Validate jump action.
4953  *
4954  * @param[in] action
4955  *   Pointer to the jump action.
4956  * @param[in] action_flags
4957  *   Holds the actions detected until now.
4958  * @param[in] attributes
4959  *   Pointer to flow attributes
4960  * @param[in] external
4961  *   Action belongs to flow rule created by request external to PMD.
4962  * @param[out] error
4963  *   Pointer to error structure.
4964  *
4965  * @return
4966  *   0 on success, a negative errno value otherwise and rte_errno is set.
4967  */
4968 static int
4969 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4970                              const struct mlx5_flow_tunnel *tunnel,
4971                              const struct rte_flow_action *action,
4972                              uint64_t action_flags,
4973                              const struct rte_flow_attr *attributes,
4974                              bool external, struct rte_flow_error *error)
4975 {
4976         uint32_t target_group, table = 0;
4977         int ret = 0;
4978         struct flow_grp_info grp_info = {
4979                 .external = !!external,
4980                 .transfer = !!attributes->transfer,
4981                 .fdb_def_rule = 1,
4982                 .std_tbl_fix = 0
4983         };
4984         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4985                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4988                                           "can't have 2 fate actions in"
4989                                           " same flow");
4990         if (!action->conf)
4991                 return rte_flow_error_set(error, EINVAL,
4992                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4993                                           NULL, "action configuration not set");
4994         target_group =
4995                 ((const struct rte_flow_action_jump *)action->conf)->group;
4996         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4997                                        &grp_info, error);
4998         if (ret)
4999                 return ret;
5000         if (attributes->group == target_group &&
5001             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5002                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5003                 return rte_flow_error_set(error, EINVAL,
5004                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5005                                           "target group must be other than"
5006                                           " the current flow group");
5007         if (table == 0)
5008                 return rte_flow_error_set(error, EINVAL,
5009                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5010                                           NULL, "root table shouldn't be destination");
5011         return 0;
5012 }
5013
5014 /*
5015  * Validate action PORT_ID / REPRESENTED_PORT.
5016  *
5017  * @param[in] dev
5018  *   Pointer to rte_eth_dev structure.
5019  * @param[in] action_flags
5020  *   Bit-fields that holds the actions detected until now.
5021  * @param[in] action
5022  *   PORT_ID / REPRESENTED_PORT action structure.
5023  * @param[in] attr
5024  *   Attributes of flow that includes this action.
5025  * @param[out] error
5026  *   Pointer to error structure.
5027  *
5028  * @return
5029  *   0 on success, a negative errno value otherwise and rte_errno is set.
5030  */
5031 static int
5032 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5033                                 uint64_t action_flags,
5034                                 const struct rte_flow_action *action,
5035                                 const struct rte_flow_attr *attr,
5036                                 struct rte_flow_error *error)
5037 {
5038         const struct rte_flow_action_port_id *port_id;
5039         const struct rte_flow_action_ethdev *ethdev;
5040         struct mlx5_priv *act_priv;
5041         struct mlx5_priv *dev_priv;
5042         uint16_t port;
5043
5044         if (!attr->transfer)
5045                 return rte_flow_error_set(error, ENOTSUP,
5046                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5047                                           NULL,
5048                                           "port action is valid in transfer"
5049                                           " mode only");
5050         if (!action || !action->conf)
5051                 return rte_flow_error_set(error, ENOTSUP,
5052                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5053                                           NULL,
5054                                           "port action parameters must be"
5055                                           " specified");
5056         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5057                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5058                 return rte_flow_error_set(error, EINVAL,
5059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5060                                           "can have only one fate actions in"
5061                                           " a flow");
5062         dev_priv = mlx5_dev_to_eswitch_info(dev);
5063         if (!dev_priv)
5064                 return rte_flow_error_set(error, rte_errno,
5065                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5066                                           NULL,
5067                                           "failed to obtain E-Switch info");
5068         switch (action->type) {
5069         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5070                 port_id = action->conf;
5071                 port = port_id->original ? dev->data->port_id : port_id->id;
5072                 break;
5073         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5074                 ethdev = action->conf;
5075                 port = ethdev->port_id;
5076                 break;
5077         default:
5078                 MLX5_ASSERT(false);
5079                 return rte_flow_error_set
5080                                 (error, EINVAL,
5081                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5082                                  "unknown E-Switch action");
5083         }
5084         act_priv = mlx5_port_to_eswitch_info(port, false);
5085         if (!act_priv)
5086                 return rte_flow_error_set
5087                                 (error, rte_errno,
5088                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5089                                  "failed to obtain E-Switch port id for port");
5090         if (act_priv->domain_id != dev_priv->domain_id)
5091                 return rte_flow_error_set
5092                                 (error, EINVAL,
5093                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5094                                  "port does not belong to"
5095                                  " E-Switch being configured");
5096         return 0;
5097 }
5098
5099 /**
5100  * Get the maximum number of modify header actions.
5101  *
5102  * @param dev
5103  *   Pointer to rte_eth_dev structure.
5104  * @param root
5105  *   Whether action is on root table.
5106  *
5107  * @return
5108  *   Max number of modify header actions device can support.
5109  */
5110 static inline unsigned int
5111 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5112                               bool root)
5113 {
5114         /*
5115          * There's no way to directly query the max capacity from FW.
5116          * The maximal value on root table should be assumed to be supported.
5117          */
5118         if (!root)
5119                 return MLX5_MAX_MODIFY_NUM;
5120         else
5121                 return MLX5_ROOT_TBL_MODIFY_NUM;
5122 }
5123
5124 /**
5125  * Validate the meter action.
5126  *
5127  * @param[in] dev
5128  *   Pointer to rte_eth_dev structure.
5129  * @param[in] action_flags
5130  *   Bit-fields that holds the actions detected until now.
5131  * @param[in] item_flags
5132  *   Holds the items detected.
5133  * @param[in] action
5134  *   Pointer to the meter action.
5135  * @param[in] attr
5136  *   Attributes of flow that includes this action.
5137  * @param[in] port_id_item
5138  *   Pointer to item indicating port id.
5139  * @param[out] error
5140  *   Pointer to error structure.
5141  *
5142  * @return
5143  *   0 on success, a negative errno value otherwise and rte_errno is set.
5144  */
5145 static int
5146 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5147                                 uint64_t action_flags, uint64_t item_flags,
5148                                 const struct rte_flow_action *action,
5149                                 const struct rte_flow_attr *attr,
5150                                 const struct rte_flow_item *port_id_item,
5151                                 bool *def_policy,
5152                                 struct rte_flow_error *error)
5153 {
5154         struct mlx5_priv *priv = dev->data->dev_private;
5155         const struct rte_flow_action_meter *am = action->conf;
5156         struct mlx5_flow_meter_info *fm;
5157         struct mlx5_flow_meter_policy *mtr_policy;
5158         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5159
5160         if (!am)
5161                 return rte_flow_error_set(error, EINVAL,
5162                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5163                                           "meter action conf is NULL");
5164
5165         if (action_flags & MLX5_FLOW_ACTION_METER)
5166                 return rte_flow_error_set(error, ENOTSUP,
5167                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5168                                           "meter chaining not support");
5169         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5170                 return rte_flow_error_set(error, ENOTSUP,
5171                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5172                                           "meter with jump not support");
5173         if (!priv->mtr_en)
5174                 return rte_flow_error_set(error, ENOTSUP,
5175                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5176                                           NULL,
5177                                           "meter action not supported");
5178         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5179         if (!fm)
5180                 return rte_flow_error_set(error, EINVAL,
5181                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5182                                           "Meter not found");
5183         /* aso meter can always be shared by different domains */
5184         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5185             !(fm->transfer == attr->transfer ||
5186               (!fm->ingress && !attr->ingress && attr->egress) ||
5187               (!fm->egress && !attr->egress && attr->ingress)))
5188                 return rte_flow_error_set(error, EINVAL,
5189                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5190                         "Flow attributes domain are either invalid "
5191                         "or have a domain conflict with current "
5192                         "meter attributes");
5193         if (fm->def_policy) {
5194                 if (!((attr->transfer &&
5195                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5196                         (attr->egress &&
5197                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5198                         (attr->ingress &&
5199                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5200                         return rte_flow_error_set(error, EINVAL,
5201                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5202                                           "Flow attributes domain "
5203                                           "have a conflict with current "
5204                                           "meter domain attributes");
5205                 *def_policy = true;
5206         } else {
5207                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5208                                                 fm->policy_id, NULL);
5209                 if (!mtr_policy)
5210                         return rte_flow_error_set(error, EINVAL,
5211                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5212                                           "Invalid policy id for meter ");
5213                 if (!((attr->transfer && mtr_policy->transfer) ||
5214                         (attr->egress && mtr_policy->egress) ||
5215                         (attr->ingress && mtr_policy->ingress)))
5216                         return rte_flow_error_set(error, EINVAL,
5217                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5218                                           "Flow attributes domain "
5219                                           "have a conflict with current "
5220                                           "meter domain attributes");
5221                 if (attr->transfer && mtr_policy->dev) {
5222                         /**
5223                          * When policy has fate action of port_id,
5224                          * the flow should have the same src port as policy.
5225                          */
5226                         struct mlx5_priv *policy_port_priv =
5227                                         mtr_policy->dev->data->dev_private;
5228                         int32_t flow_src_port = priv->representor_id;
5229
5230                         if (port_id_item) {
5231                                 const struct rte_flow_item_port_id *spec =
5232                                                         port_id_item->spec;
5233                                 struct mlx5_priv *port_priv =
5234                                         mlx5_port_to_eswitch_info(spec->id,
5235                                                                   false);
5236                                 if (!port_priv)
5237                                         return rte_flow_error_set(error,
5238                                                 rte_errno,
5239                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5240                                                 spec,
5241                                                 "Failed to get port info.");
5242                                 flow_src_port = port_priv->representor_id;
5243                         }
5244                         if (flow_src_port != policy_port_priv->representor_id)
5245                                 return rte_flow_error_set(error,
5246                                                 rte_errno,
5247                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5248                                                 NULL,
5249                                                 "Flow and meter policy "
5250                                                 "have different src port.");
5251                 } else if (mtr_policy->is_rss) {
5252                         struct mlx5_flow_meter_policy *fp;
5253                         struct mlx5_meter_policy_action_container *acg;
5254                         struct mlx5_meter_policy_action_container *acy;
5255                         const struct rte_flow_action *rss_act;
5256                         int ret;
5257
5258                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5259                                                                 mtr_policy);
5260                         if (fp == NULL)
5261                                 return rte_flow_error_set(error, EINVAL,
5262                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5263                                                   "Unable to get the final "
5264                                                   "policy in the hierarchy");
5265                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5266                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5267                         MLX5_ASSERT(acg->fate_action ==
5268                                     MLX5_FLOW_FATE_SHARED_RSS ||
5269                                     acy->fate_action ==
5270                                     MLX5_FLOW_FATE_SHARED_RSS);
5271                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5272                                 rss_act = acg->rss;
5273                         else
5274                                 rss_act = acy->rss;
5275                         ret = mlx5_flow_validate_action_rss(rss_act,
5276                                         action_flags, dev, attr,
5277                                         item_flags, error);
5278                         if (ret)
5279                                 return ret;
5280                 }
5281                 *def_policy = false;
5282         }
5283         return 0;
5284 }
5285
5286 /**
5287  * Validate the age action.
5288  *
5289  * @param[in] action_flags
5290  *   Holds the actions detected until now.
5291  * @param[in] action
5292  *   Pointer to the age action.
5293  * @param[in] dev
5294  *   Pointer to the Ethernet device structure.
5295  * @param[out] error
5296  *   Pointer to error structure.
5297  *
5298  * @return
5299  *   0 on success, a negative errno value otherwise and rte_errno is set.
5300  */
5301 static int
5302 flow_dv_validate_action_age(uint64_t action_flags,
5303                             const struct rte_flow_action *action,
5304                             struct rte_eth_dev *dev,
5305                             struct rte_flow_error *error)
5306 {
5307         struct mlx5_priv *priv = dev->data->dev_private;
5308         const struct rte_flow_action_age *age = action->conf;
5309
5310         if (!priv->sh->cdev->config.devx ||
5311             (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
5312                 return rte_flow_error_set(error, ENOTSUP,
5313                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5314                                           NULL,
5315                                           "age action not supported");
5316         if (!(action->conf))
5317                 return rte_flow_error_set(error, EINVAL,
5318                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5319                                           "configuration cannot be null");
5320         if (!(age->timeout))
5321                 return rte_flow_error_set(error, EINVAL,
5322                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5323                                           "invalid timeout value 0");
5324         if (action_flags & MLX5_FLOW_ACTION_AGE)
5325                 return rte_flow_error_set(error, EINVAL,
5326                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5327                                           "duplicate age actions set");
5328         return 0;
5329 }
5330
5331 /**
5332  * Validate the modify-header IPv4 DSCP actions.
5333  *
5334  * @param[in] action_flags
5335  *   Holds the actions detected until now.
5336  * @param[in] action
5337  *   Pointer to the modify action.
5338  * @param[in] item_flags
5339  *   Holds the items detected.
5340  * @param[out] error
5341  *   Pointer to error structure.
5342  *
5343  * @return
5344  *   0 on success, a negative errno value otherwise and rte_errno is set.
5345  */
5346 static int
5347 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5348                                          const struct rte_flow_action *action,
5349                                          const uint64_t item_flags,
5350                                          struct rte_flow_error *error)
5351 {
5352         int ret = 0;
5353
5354         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5355         if (!ret) {
5356                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5357                         return rte_flow_error_set(error, EINVAL,
5358                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5359                                                   NULL,
5360                                                   "no ipv4 item in pattern");
5361         }
5362         return ret;
5363 }
5364
5365 /**
5366  * Validate the modify-header IPv6 DSCP actions.
5367  *
5368  * @param[in] action_flags
5369  *   Holds the actions detected until now.
5370  * @param[in] action
5371  *   Pointer to the modify action.
5372  * @param[in] item_flags
5373  *   Holds the items detected.
5374  * @param[out] error
5375  *   Pointer to error structure.
5376  *
5377  * @return
5378  *   0 on success, a negative errno value otherwise and rte_errno is set.
5379  */
5380 static int
5381 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5382                                          const struct rte_flow_action *action,
5383                                          const uint64_t item_flags,
5384                                          struct rte_flow_error *error)
5385 {
5386         int ret = 0;
5387
5388         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5389         if (!ret) {
5390                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5391                         return rte_flow_error_set(error, EINVAL,
5392                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5393                                                   NULL,
5394                                                   "no ipv6 item in pattern");
5395         }
5396         return ret;
5397 }
5398
5399 int
5400 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5401                         struct mlx5_list_entry *entry, void *cb_ctx)
5402 {
5403         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5404         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5405         struct mlx5_flow_dv_modify_hdr_resource *resource =
5406                                   container_of(entry, typeof(*resource), entry);
5407         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5408
5409         key_len += ref->actions_num * sizeof(ref->actions[0]);
5410         return ref->actions_num != resource->actions_num ||
5411                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5412 }
5413
5414 static struct mlx5_indexed_pool *
5415 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5416 {
5417         struct mlx5_indexed_pool *ipool = __atomic_load_n
5418                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5419
5420         if (!ipool) {
5421                 struct mlx5_indexed_pool *expected = NULL;
5422                 struct mlx5_indexed_pool_config cfg =
5423                     (struct mlx5_indexed_pool_config) {
5424                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5425                                                                    (index + 1) *
5426                                            sizeof(struct mlx5_modification_cmd),
5427                        .trunk_size = 64,
5428                        .grow_trunk = 3,
5429                        .grow_shift = 2,
5430                        .need_lock = 1,
5431                        .release_mem_en = !!sh->config.reclaim_mode,
5432                        .per_core_cache =
5433                                        sh->config.reclaim_mode ? 0 : (1 << 16),
5434                        .malloc = mlx5_malloc,
5435                        .free = mlx5_free,
5436                        .type = "mlx5_modify_action_resource",
5437                 };
5438
5439                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5440                 ipool = mlx5_ipool_create(&cfg);
5441                 if (!ipool)
5442                         return NULL;
5443                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5444                                                  &expected, ipool, false,
5445                                                  __ATOMIC_SEQ_CST,
5446                                                  __ATOMIC_SEQ_CST)) {
5447                         mlx5_ipool_destroy(ipool);
5448                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5449                                                 __ATOMIC_SEQ_CST);
5450                 }
5451         }
5452         return ipool;
5453 }
5454
5455 struct mlx5_list_entry *
5456 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5457 {
5458         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5459         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5460         struct mlx5dv_dr_domain *ns;
5461         struct mlx5_flow_dv_modify_hdr_resource *entry;
5462         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5463         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5464                                                           ref->actions_num - 1);
5465         int ret;
5466         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5467         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5468         uint32_t idx;
5469
5470         if (unlikely(!ipool)) {
5471                 rte_flow_error_set(ctx->error, ENOMEM,
5472                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5473                                    NULL, "cannot allocate modify ipool");
5474                 return NULL;
5475         }
5476         entry = mlx5_ipool_zmalloc(ipool, &idx);
5477         if (!entry) {
5478                 rte_flow_error_set(ctx->error, ENOMEM,
5479                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5480                                    "cannot allocate resource memory");
5481                 return NULL;
5482         }
5483         rte_memcpy(&entry->ft_type,
5484                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5485                    key_len + data_len);
5486         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5487                 ns = sh->fdb_domain;
5488         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5489                 ns = sh->tx_domain;
5490         else
5491                 ns = sh->rx_domain;
5492         ret = mlx5_flow_os_create_flow_action_modify_header
5493                                         (sh->cdev->ctx, ns, entry,
5494                                          data_len, &entry->action);
5495         if (ret) {
5496                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5497                 rte_flow_error_set(ctx->error, ENOMEM,
5498                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5499                                    NULL, "cannot create modification action");
5500                 return NULL;
5501         }
5502         entry->idx = idx;
5503         return &entry->entry;
5504 }
5505
5506 struct mlx5_list_entry *
5507 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5508                         void *cb_ctx)
5509 {
5510         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5511         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5512         struct mlx5_flow_dv_modify_hdr_resource *entry;
5513         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5514         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5515         uint32_t idx;
5516
5517         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5518                                   &idx);
5519         if (!entry) {
5520                 rte_flow_error_set(ctx->error, ENOMEM,
5521                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5522                                    "cannot allocate resource memory");
5523                 return NULL;
5524         }
5525         memcpy(entry, oentry, sizeof(*entry) + data_len);
5526         entry->idx = idx;
5527         return &entry->entry;
5528 }
5529
5530 void
5531 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5532 {
5533         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5534         struct mlx5_flow_dv_modify_hdr_resource *res =
5535                 container_of(entry, typeof(*res), entry);
5536
5537         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5538 }
5539
5540 /**
5541  * Validate the sample action.
5542  *
5543  * @param[in, out] action_flags
5544  *   Holds the actions detected until now.
5545  * @param[in] action
5546  *   Pointer to the sample action.
5547  * @param[in] dev
5548  *   Pointer to the Ethernet device structure.
5549  * @param[in] attr
5550  *   Attributes of flow that includes this action.
5551  * @param[in] item_flags
5552  *   Holds the items detected.
5553  * @param[in] rss
5554  *   Pointer to the RSS action.
5555  * @param[out] sample_rss
5556  *   Pointer to the RSS action in sample action list.
5557  * @param[out] count
5558  *   Pointer to the COUNT action in sample action list.
5559  * @param[out] fdb_mirror_limit
5560  *   Pointer to the FDB mirror limitation flag.
5561  * @param[out] error
5562  *   Pointer to error structure.
5563  *
5564  * @return
5565  *   0 on success, a negative errno value otherwise and rte_errno is set.
5566  */
5567 static int
5568 flow_dv_validate_action_sample(uint64_t *action_flags,
5569                                const struct rte_flow_action *action,
5570                                struct rte_eth_dev *dev,
5571                                const struct rte_flow_attr *attr,
5572                                uint64_t item_flags,
5573                                const struct rte_flow_action_rss *rss,
5574                                const struct rte_flow_action_rss **sample_rss,
5575                                const struct rte_flow_action_count **count,
5576                                int *fdb_mirror_limit,
5577                                struct rte_flow_error *error)
5578 {
5579         struct mlx5_priv *priv = dev->data->dev_private;
5580         struct mlx5_sh_config *dev_conf = &priv->sh->config;
5581         const struct rte_flow_action_sample *sample = action->conf;
5582         const struct rte_flow_action *act;
5583         uint64_t sub_action_flags = 0;
5584         uint16_t queue_index = 0xFFFF;
5585         int actions_n = 0;
5586         int ret;
5587
5588         if (!sample)
5589                 return rte_flow_error_set(error, EINVAL,
5590                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5591                                           "configuration cannot be NULL");
5592         if (sample->ratio == 0)
5593                 return rte_flow_error_set(error, EINVAL,
5594                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5595                                           "ratio value starts from 1");
5596         if (!priv->sh->cdev->config.devx ||
5597             (sample->ratio > 0 && !priv->sampler_en))
5598                 return rte_flow_error_set(error, ENOTSUP,
5599                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5600                                           NULL,
5601                                           "sample action not supported");
5602         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5603                 return rte_flow_error_set(error, EINVAL,
5604                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5605                                           "Multiple sample actions not "
5606                                           "supported");
5607         if (*action_flags & MLX5_FLOW_ACTION_METER)
5608                 return rte_flow_error_set(error, EINVAL,
5609                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5610                                           "wrong action order, meter should "
5611                                           "be after sample action");
5612         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5613                 return rte_flow_error_set(error, EINVAL,
5614                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5615                                           "wrong action order, jump should "
5616                                           "be after sample action");
5617         if (*action_flags & MLX5_FLOW_ACTION_CT)
5618                 return rte_flow_error_set(error, EINVAL,
5619                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5620                                           "Sample after CT not supported");
5621         act = sample->actions;
5622         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5623                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5624                         return rte_flow_error_set(error, ENOTSUP,
5625                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5626                                                   act, "too many actions");
5627                 switch (act->type) {
5628                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5629                         ret = mlx5_flow_validate_action_queue(act,
5630                                                               sub_action_flags,
5631                                                               dev,
5632                                                               attr, error);
5633                         if (ret < 0)
5634                                 return ret;
5635                         queue_index = ((const struct rte_flow_action_queue *)
5636                                                         (act->conf))->index;
5637                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5638                         ++actions_n;
5639                         break;
5640                 case RTE_FLOW_ACTION_TYPE_RSS:
5641                         *sample_rss = act->conf;
5642                         ret = mlx5_flow_validate_action_rss(act,
5643                                                             sub_action_flags,
5644                                                             dev, attr,
5645                                                             item_flags,
5646                                                             error);
5647                         if (ret < 0)
5648                                 return ret;
5649                         if (rss && *sample_rss &&
5650                             ((*sample_rss)->level != rss->level ||
5651                             (*sample_rss)->types != rss->types))
5652                                 return rte_flow_error_set(error, ENOTSUP,
5653                                         RTE_FLOW_ERROR_TYPE_ACTION,
5654                                         NULL,
5655                                         "Can't use the different RSS types "
5656                                         "or level in the same flow");
5657                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5658                                 queue_index = (*sample_rss)->queue[0];
5659                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5660                         ++actions_n;
5661                         break;
5662                 case RTE_FLOW_ACTION_TYPE_MARK:
5663                         ret = flow_dv_validate_action_mark(dev, act,
5664                                                            sub_action_flags,
5665                                                            attr, error);
5666                         if (ret < 0)
5667                                 return ret;
5668                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5669                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5670                                                 MLX5_FLOW_ACTION_MARK_EXT;
5671                         else
5672                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5673                         ++actions_n;
5674                         break;
5675                 case RTE_FLOW_ACTION_TYPE_COUNT:
5676                         ret = flow_dv_validate_action_count
5677                                 (dev, false, *action_flags | sub_action_flags,
5678                                  error);
5679                         if (ret < 0)
5680                                 return ret;
5681                         *count = act->conf;
5682                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5683                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5684                         ++actions_n;
5685                         break;
5686                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5687                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5688                         ret = flow_dv_validate_action_port_id(dev,
5689                                                               sub_action_flags,
5690                                                               act,
5691                                                               attr,
5692                                                               error);
5693                         if (ret)
5694                                 return ret;
5695                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5696                         ++actions_n;
5697                         break;
5698                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5699                         ret = flow_dv_validate_action_raw_encap_decap
5700                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5701                                  &actions_n, action, item_flags, error);
5702                         if (ret < 0)
5703                                 return ret;
5704                         ++actions_n;
5705                         break;
5706                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5707                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5708                         ret = flow_dv_validate_action_l2_encap(dev,
5709                                                                sub_action_flags,
5710                                                                act, attr,
5711                                                                error);
5712                         if (ret < 0)
5713                                 return ret;
5714                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5715                         ++actions_n;
5716                         break;
5717                 default:
5718                         return rte_flow_error_set(error, ENOTSUP,
5719                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5720                                                   NULL,
5721                                                   "Doesn't support optional "
5722                                                   "action");
5723                 }
5724         }
5725         if (attr->ingress && !attr->transfer) {
5726                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5727                                           MLX5_FLOW_ACTION_RSS)))
5728                         return rte_flow_error_set(error, EINVAL,
5729                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5730                                                   NULL,
5731                                                   "Ingress must has a dest "
5732                                                   "QUEUE for Sample");
5733         } else if (attr->egress && !attr->transfer) {
5734                 return rte_flow_error_set(error, ENOTSUP,
5735                                           RTE_FLOW_ERROR_TYPE_ACTION,
5736                                           NULL,
5737                                           "Sample Only support Ingress "
5738                                           "or E-Switch");
5739         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5740                 MLX5_ASSERT(attr->transfer);
5741                 if (sample->ratio > 1)
5742                         return rte_flow_error_set(error, ENOTSUP,
5743                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5744                                                   NULL,
5745                                                   "E-Switch doesn't support "
5746                                                   "any optional action "
5747                                                   "for sampling");
5748                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5749                         return rte_flow_error_set(error, ENOTSUP,
5750                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5751                                                   NULL,
5752                                                   "unsupported action QUEUE");
5753                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5754                         return rte_flow_error_set(error, ENOTSUP,
5755                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5756                                                   NULL,
5757                                                   "unsupported action QUEUE");
5758                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5759                         return rte_flow_error_set(error, EINVAL,
5760                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5761                                                   NULL,
5762                                                   "E-Switch must has a dest "
5763                                                   "port for mirroring");
5764                 if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
5765                      priv->representor_id != UINT16_MAX)
5766                         *fdb_mirror_limit = 1;
5767         }
5768         /* Continue validation for Xcap actions.*/
5769         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5770             (queue_index == 0xFFFF ||
5771              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5772                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5773                      MLX5_FLOW_XCAP_ACTIONS)
5774                         return rte_flow_error_set(error, ENOTSUP,
5775                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5776                                                   NULL, "encap and decap "
5777                                                   "combination aren't "
5778                                                   "supported");
5779                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5780                                                         MLX5_FLOW_ACTION_ENCAP))
5781                         return rte_flow_error_set(error, ENOTSUP,
5782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5783                                                   NULL, "encap is not supported"
5784                                                   " for ingress traffic");
5785         }
5786         return 0;
5787 }
5788
5789 /**
5790  * Find existing modify-header resource or create and register a new one.
5791  *
5792  * @param dev[in, out]
5793  *   Pointer to rte_eth_dev structure.
5794  * @param[in, out] resource
5795  *   Pointer to modify-header resource.
5796  * @parm[in, out] dev_flow
5797  *   Pointer to the dev_flow.
5798  * @param[out] error
5799  *   pointer to error structure.
5800  *
5801  * @return
5802  *   0 on success otherwise -errno and errno is set.
5803  */
5804 static int
5805 flow_dv_modify_hdr_resource_register
5806                         (struct rte_eth_dev *dev,
5807                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5808                          struct mlx5_flow *dev_flow,
5809                          struct rte_flow_error *error)
5810 {
5811         struct mlx5_priv *priv = dev->data->dev_private;
5812         struct mlx5_dev_ctx_shared *sh = priv->sh;
5813         uint32_t key_len = sizeof(*resource) -
5814                            offsetof(typeof(*resource), ft_type) +
5815                            resource->actions_num * sizeof(resource->actions[0]);
5816         struct mlx5_list_entry *entry;
5817         struct mlx5_flow_cb_ctx ctx = {
5818                 .error = error,
5819                 .data = resource,
5820         };
5821         struct mlx5_hlist *modify_cmds;
5822         uint64_t key64;
5823
5824         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5825                                 "hdr_modify",
5826                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5827                                 true, false, sh,
5828                                 flow_dv_modify_create_cb,
5829                                 flow_dv_modify_match_cb,
5830                                 flow_dv_modify_remove_cb,
5831                                 flow_dv_modify_clone_cb,
5832                                 flow_dv_modify_clone_free_cb,
5833                                 error);
5834         if (unlikely(!modify_cmds))
5835                 return -rte_errno;
5836         resource->root = !dev_flow->dv.group;
5837         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5838                                                                 resource->root))
5839                 return rte_flow_error_set(error, EOVERFLOW,
5840                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5841                                           "too many modify header items");
5842         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5843         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5844         if (!entry)
5845                 return -rte_errno;
5846         resource = container_of(entry, typeof(*resource), entry);
5847         dev_flow->handle->dvh.modify_hdr = resource;
5848         return 0;
5849 }
5850
5851 /**
5852  * Get DV flow counter by index.
5853  *
5854  * @param[in] dev
5855  *   Pointer to the Ethernet device structure.
5856  * @param[in] idx
5857  *   mlx5 flow counter index in the container.
5858  * @param[out] ppool
5859  *   mlx5 flow counter pool in the container.
5860  *
5861  * @return
5862  *   Pointer to the counter, NULL otherwise.
5863  */
5864 static struct mlx5_flow_counter *
5865 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5866                            uint32_t idx,
5867                            struct mlx5_flow_counter_pool **ppool)
5868 {
5869         struct mlx5_priv *priv = dev->data->dev_private;
5870         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5871         struct mlx5_flow_counter_pool *pool;
5872
5873         /* Decrease to original index and clear shared bit. */
5874         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5875         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5876         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5877         MLX5_ASSERT(pool);
5878         if (ppool)
5879                 *ppool = pool;
5880         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5881 }
5882
5883 /**
5884  * Check the devx counter belongs to the pool.
5885  *
5886  * @param[in] pool
5887  *   Pointer to the counter pool.
5888  * @param[in] id
5889  *   The counter devx ID.
5890  *
5891  * @return
5892  *   True if counter belongs to the pool, false otherwise.
5893  */
5894 static bool
5895 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5896 {
5897         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5898                    MLX5_COUNTERS_PER_POOL;
5899
5900         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5901                 return true;
5902         return false;
5903 }
5904
5905 /**
5906  * Get a pool by devx counter ID.
5907  *
5908  * @param[in] cmng
5909  *   Pointer to the counter management.
5910  * @param[in] id
5911  *   The counter devx ID.
5912  *
5913  * @return
5914  *   The counter pool pointer if exists, NULL otherwise,
5915  */
5916 static struct mlx5_flow_counter_pool *
5917 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5918 {
5919         uint32_t i;
5920         struct mlx5_flow_counter_pool *pool = NULL;
5921
5922         rte_spinlock_lock(&cmng->pool_update_sl);
5923         /* Check last used pool. */
5924         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5925             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5926                 pool = cmng->pools[cmng->last_pool_idx];
5927                 goto out;
5928         }
5929         /* ID out of range means no suitable pool in the container. */
5930         if (id > cmng->max_id || id < cmng->min_id)
5931                 goto out;
5932         /*
5933          * Find the pool from the end of the container, since mostly counter
5934          * ID is sequence increasing, and the last pool should be the needed
5935          * one.
5936          */
5937         i = cmng->n_valid;
5938         while (i--) {
5939                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5940
5941                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5942                         pool = pool_tmp;
5943                         break;
5944                 }
5945         }
5946 out:
5947         rte_spinlock_unlock(&cmng->pool_update_sl);
5948         return pool;
5949 }
5950
5951 /**
5952  * Resize a counter container.
5953  *
5954  * @param[in] dev
5955  *   Pointer to the Ethernet device structure.
5956  *
5957  * @return
5958  *   0 on success, otherwise negative errno value and rte_errno is set.
5959  */
5960 static int
5961 flow_dv_container_resize(struct rte_eth_dev *dev)
5962 {
5963         struct mlx5_priv *priv = dev->data->dev_private;
5964         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5965         void *old_pools = cmng->pools;
5966         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5967         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5968         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5969
5970         if (!pools) {
5971                 rte_errno = ENOMEM;
5972                 return -ENOMEM;
5973         }
5974         if (old_pools)
5975                 memcpy(pools, old_pools, cmng->n *
5976                                        sizeof(struct mlx5_flow_counter_pool *));
5977         cmng->n = resize;
5978         cmng->pools = pools;
5979         if (old_pools)
5980                 mlx5_free(old_pools);
5981         return 0;
5982 }
5983
5984 /**
5985  * Query a devx flow counter.
5986  *
5987  * @param[in] dev
5988  *   Pointer to the Ethernet device structure.
5989  * @param[in] counter
5990  *   Index to the flow counter.
5991  * @param[out] pkts
5992  *   The statistics value of packets.
5993  * @param[out] bytes
5994  *   The statistics value of bytes.
5995  *
5996  * @return
5997  *   0 on success, otherwise a negative errno value and rte_errno is set.
5998  */
5999 static inline int
6000 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
6001                      uint64_t *bytes)
6002 {
6003         struct mlx5_priv *priv = dev->data->dev_private;
6004         struct mlx5_flow_counter_pool *pool = NULL;
6005         struct mlx5_flow_counter *cnt;
6006         int offset;
6007
6008         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6009         MLX5_ASSERT(pool);
6010         if (priv->sh->cmng.counter_fallback)
6011                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6012                                         0, pkts, bytes, 0, NULL, NULL, 0);
6013         rte_spinlock_lock(&pool->sl);
6014         if (!pool->raw) {
6015                 *pkts = 0;
6016                 *bytes = 0;
6017         } else {
6018                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6019                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6020                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6021         }
6022         rte_spinlock_unlock(&pool->sl);
6023         return 0;
6024 }
6025
6026 /**
6027  * Create and initialize a new counter pool.
6028  *
6029  * @param[in] dev
6030  *   Pointer to the Ethernet device structure.
6031  * @param[out] dcs
6032  *   The devX counter handle.
6033  * @param[in] age
6034  *   Whether the pool is for counter that was allocated for aging.
6035  * @param[in/out] cont_cur
6036  *   Pointer to the container pointer, it will be update in pool resize.
6037  *
6038  * @return
6039  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6040  */
6041 static struct mlx5_flow_counter_pool *
6042 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6043                     uint32_t age)
6044 {
6045         struct mlx5_priv *priv = dev->data->dev_private;
6046         struct mlx5_flow_counter_pool *pool;
6047         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6048         bool fallback = priv->sh->cmng.counter_fallback;
6049         uint32_t size = sizeof(*pool);
6050
6051         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6052         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6053         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6054         if (!pool) {
6055                 rte_errno = ENOMEM;
6056                 return NULL;
6057         }
6058         pool->raw = NULL;
6059         pool->is_aged = !!age;
6060         pool->query_gen = 0;
6061         pool->min_dcs = dcs;
6062         rte_spinlock_init(&pool->sl);
6063         rte_spinlock_init(&pool->csl);
6064         TAILQ_INIT(&pool->counters[0]);
6065         TAILQ_INIT(&pool->counters[1]);
6066         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6067         rte_spinlock_lock(&cmng->pool_update_sl);
6068         pool->index = cmng->n_valid;
6069         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6070                 mlx5_free(pool);
6071                 rte_spinlock_unlock(&cmng->pool_update_sl);
6072                 return NULL;
6073         }
6074         cmng->pools[pool->index] = pool;
6075         cmng->n_valid++;
6076         if (unlikely(fallback)) {
6077                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6078
6079                 if (base < cmng->min_id)
6080                         cmng->min_id = base;
6081                 if (base > cmng->max_id)
6082                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6083                 cmng->last_pool_idx = pool->index;
6084         }
6085         rte_spinlock_unlock(&cmng->pool_update_sl);
6086         return pool;
6087 }
6088
6089 /**
6090  * Prepare a new counter and/or a new counter pool.
6091  *
6092  * @param[in] dev
6093  *   Pointer to the Ethernet device structure.
6094  * @param[out] cnt_free
6095  *   Where to put the pointer of a new counter.
6096  * @param[in] age
6097  *   Whether the pool is for counter that was allocated for aging.
6098  *
6099  * @return
6100  *   The counter pool pointer and @p cnt_free is set on success,
6101  *   NULL otherwise and rte_errno is set.
6102  */
6103 static struct mlx5_flow_counter_pool *
6104 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6105                              struct mlx5_flow_counter **cnt_free,
6106                              uint32_t age)
6107 {
6108         struct mlx5_priv *priv = dev->data->dev_private;
6109         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6110         struct mlx5_flow_counter_pool *pool;
6111         struct mlx5_counters tmp_tq;
6112         struct mlx5_devx_obj *dcs = NULL;
6113         struct mlx5_flow_counter *cnt;
6114         enum mlx5_counter_type cnt_type =
6115                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6116         bool fallback = priv->sh->cmng.counter_fallback;
6117         uint32_t i;
6118
6119         if (fallback) {
6120                 /* bulk_bitmap must be 0 for single counter allocation. */
6121                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6122                 if (!dcs)
6123                         return NULL;
6124                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6125                 if (!pool) {
6126                         pool = flow_dv_pool_create(dev, dcs, age);
6127                         if (!pool) {
6128                                 mlx5_devx_cmd_destroy(dcs);
6129                                 return NULL;
6130                         }
6131                 }
6132                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6133                 cnt = MLX5_POOL_GET_CNT(pool, i);
6134                 cnt->pool = pool;
6135                 cnt->dcs_when_free = dcs;
6136                 *cnt_free = cnt;
6137                 return pool;
6138         }
6139         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6140         if (!dcs) {
6141                 rte_errno = ENODATA;
6142                 return NULL;
6143         }
6144         pool = flow_dv_pool_create(dev, dcs, age);
6145         if (!pool) {
6146                 mlx5_devx_cmd_destroy(dcs);
6147                 return NULL;
6148         }
6149         TAILQ_INIT(&tmp_tq);
6150         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6151                 cnt = MLX5_POOL_GET_CNT(pool, i);
6152                 cnt->pool = pool;
6153                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6154         }
6155         rte_spinlock_lock(&cmng->csl[cnt_type]);
6156         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6157         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6158         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6159         (*cnt_free)->pool = pool;
6160         return pool;
6161 }
6162
6163 /**
6164  * Allocate a flow counter.
6165  *
6166  * @param[in] dev
6167  *   Pointer to the Ethernet device structure.
6168  * @param[in] age
6169  *   Whether the counter was allocated for aging.
6170  *
6171  * @return
6172  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6173  */
6174 static uint32_t
6175 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6176 {
6177         struct mlx5_priv *priv = dev->data->dev_private;
6178         struct mlx5_flow_counter_pool *pool = NULL;
6179         struct mlx5_flow_counter *cnt_free = NULL;
6180         bool fallback = priv->sh->cmng.counter_fallback;
6181         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6182         enum mlx5_counter_type cnt_type =
6183                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6184         uint32_t cnt_idx;
6185
6186         if (!priv->sh->cdev->config.devx) {
6187                 rte_errno = ENOTSUP;
6188                 return 0;
6189         }
6190         /* Get free counters from container. */
6191         rte_spinlock_lock(&cmng->csl[cnt_type]);
6192         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6193         if (cnt_free)
6194                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6195         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6196         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6197                 goto err;
6198         pool = cnt_free->pool;
6199         if (fallback)
6200                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6201         /* Create a DV counter action only in the first time usage. */
6202         if (!cnt_free->action) {
6203                 uint16_t offset;
6204                 struct mlx5_devx_obj *dcs;
6205                 int ret;
6206
6207                 if (!fallback) {
6208                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6209                         dcs = pool->min_dcs;
6210                 } else {
6211                         offset = 0;
6212                         dcs = cnt_free->dcs_when_free;
6213                 }
6214                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6215                                                             &cnt_free->action);
6216                 if (ret) {
6217                         rte_errno = errno;
6218                         goto err;
6219                 }
6220         }
6221         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6222                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6223         /* Update the counter reset values. */
6224         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6225                                  &cnt_free->bytes))
6226                 goto err;
6227         if (!fallback && !priv->sh->cmng.query_thread_on)
6228                 /* Start the asynchronous batch query by the host thread. */
6229                 mlx5_set_query_alarm(priv->sh);
6230         /*
6231          * When the count action isn't shared (by ID), shared_info field is
6232          * used for indirect action API's refcnt.
6233          * When the counter action is not shared neither by ID nor by indirect
6234          * action API, shared info must be 1.
6235          */
6236         cnt_free->shared_info.refcnt = 1;
6237         return cnt_idx;
6238 err:
6239         if (cnt_free) {
6240                 cnt_free->pool = pool;
6241                 if (fallback)
6242                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6243                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6244                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6245                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6246         }
6247         return 0;
6248 }
6249
6250 /**
6251  * Get age param from counter index.
6252  *
6253  * @param[in] dev
6254  *   Pointer to the Ethernet device structure.
6255  * @param[in] counter
6256  *   Index to the counter handler.
6257  *
6258  * @return
6259  *   The aging parameter specified for the counter index.
6260  */
6261 static struct mlx5_age_param*
6262 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6263                                 uint32_t counter)
6264 {
6265         struct mlx5_flow_counter *cnt;
6266         struct mlx5_flow_counter_pool *pool = NULL;
6267
6268         flow_dv_counter_get_by_idx(dev, counter, &pool);
6269         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6270         cnt = MLX5_POOL_GET_CNT(pool, counter);
6271         return MLX5_CNT_TO_AGE(cnt);
6272 }
6273
6274 /**
6275  * Remove a flow counter from aged counter list.
6276  *
6277  * @param[in] dev
6278  *   Pointer to the Ethernet device structure.
6279  * @param[in] counter
6280  *   Index to the counter handler.
6281  * @param[in] cnt
6282  *   Pointer to the counter handler.
6283  */
6284 static void
6285 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6286                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6287 {
6288         struct mlx5_age_info *age_info;
6289         struct mlx5_age_param *age_param;
6290         struct mlx5_priv *priv = dev->data->dev_private;
6291         uint16_t expected = AGE_CANDIDATE;
6292
6293         age_info = GET_PORT_AGE_INFO(priv);
6294         age_param = flow_dv_counter_idx_get_age(dev, counter);
6295         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6296                                          AGE_FREE, false, __ATOMIC_RELAXED,
6297                                          __ATOMIC_RELAXED)) {
6298                 /**
6299                  * We need the lock even it is age timeout,
6300                  * since counter may still in process.
6301                  */
6302                 rte_spinlock_lock(&age_info->aged_sl);
6303                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6304                 rte_spinlock_unlock(&age_info->aged_sl);
6305                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6306         }
6307 }
6308
6309 /**
6310  * Release a flow counter.
6311  *
6312  * @param[in] dev
6313  *   Pointer to the Ethernet device structure.
6314  * @param[in] counter
6315  *   Index to the counter handler.
6316  */
6317 static void
6318 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6319 {
6320         struct mlx5_priv *priv = dev->data->dev_private;
6321         struct mlx5_flow_counter_pool *pool = NULL;
6322         struct mlx5_flow_counter *cnt;
6323         enum mlx5_counter_type cnt_type;
6324
6325         if (!counter)
6326                 return;
6327         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6328         MLX5_ASSERT(pool);
6329         if (pool->is_aged) {
6330                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6331         } else {
6332                 /*
6333                  * If the counter action is shared by indirect action API,
6334                  * the atomic function reduces its references counter.
6335                  * If after the reduction the action is still referenced, the
6336                  * function returns here and does not release it.
6337                  * When the counter action is not shared by
6338                  * indirect action API, shared info is 1 before the reduction,
6339                  * so this condition is failed and function doesn't return here.
6340                  */
6341                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6342                                        __ATOMIC_RELAXED))
6343                         return;
6344         }
6345         cnt->pool = pool;
6346         /*
6347          * Put the counter back to list to be updated in none fallback mode.
6348          * Currently, we are using two list alternately, while one is in query,
6349          * add the freed counter to the other list based on the pool query_gen
6350          * value. After query finishes, add counter the list to the global
6351          * container counter list. The list changes while query starts. In
6352          * this case, lock will not be needed as query callback and release
6353          * function both operate with the different list.
6354          */
6355         if (!priv->sh->cmng.counter_fallback) {
6356                 rte_spinlock_lock(&pool->csl);
6357                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6358                 rte_spinlock_unlock(&pool->csl);
6359         } else {
6360                 cnt->dcs_when_free = cnt->dcs_when_active;
6361                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6362                                            MLX5_COUNTER_TYPE_ORIGIN;
6363                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6364                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6365                                   cnt, next);
6366                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6367         }
6368 }
6369
6370 /**
6371  * Resize a meter id container.
6372  *
6373  * @param[in] dev
6374  *   Pointer to the Ethernet device structure.
6375  *
6376  * @return
6377  *   0 on success, otherwise negative errno value and rte_errno is set.
6378  */
6379 static int
6380 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6381 {
6382         struct mlx5_priv *priv = dev->data->dev_private;
6383         struct mlx5_aso_mtr_pools_mng *pools_mng =
6384                                 &priv->sh->mtrmng->pools_mng;
6385         void *old_pools = pools_mng->pools;
6386         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6387         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6388         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6389
6390         if (!pools) {
6391                 rte_errno = ENOMEM;
6392                 return -ENOMEM;
6393         }
6394         if (!pools_mng->n)
6395                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6396                         mlx5_free(pools);
6397                         return -ENOMEM;
6398                 }
6399         if (old_pools)
6400                 memcpy(pools, old_pools, pools_mng->n *
6401                                        sizeof(struct mlx5_aso_mtr_pool *));
6402         pools_mng->n = resize;
6403         pools_mng->pools = pools;
6404         if (old_pools)
6405                 mlx5_free(old_pools);
6406         return 0;
6407 }
6408
6409 /**
6410  * Prepare a new meter and/or a new meter pool.
6411  *
6412  * @param[in] dev
6413  *   Pointer to the Ethernet device structure.
6414  * @param[out] mtr_free
6415  *   Where to put the pointer of a new meter.g.
6416  *
6417  * @return
6418  *   The meter pool pointer and @mtr_free is set on success,
6419  *   NULL otherwise and rte_errno is set.
6420  */
6421 static struct mlx5_aso_mtr_pool *
6422 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6423 {
6424         struct mlx5_priv *priv = dev->data->dev_private;
6425         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6426         struct mlx5_aso_mtr_pool *pool = NULL;
6427         struct mlx5_devx_obj *dcs = NULL;
6428         uint32_t i;
6429         uint32_t log_obj_size;
6430
6431         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6432         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6433                                                       priv->sh->cdev->pdn,
6434                                                       log_obj_size);
6435         if (!dcs) {
6436                 rte_errno = ENODATA;
6437                 return NULL;
6438         }
6439         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6440         if (!pool) {
6441                 rte_errno = ENOMEM;
6442                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6443                 return NULL;
6444         }
6445         pool->devx_obj = dcs;
6446         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6447         pool->index = pools_mng->n_valid;
6448         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6449                 mlx5_free(pool);
6450                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6451                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6452                 return NULL;
6453         }
6454         pools_mng->pools[pool->index] = pool;
6455         pools_mng->n_valid++;
6456         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6457         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6458                 pool->mtrs[i].offset = i;
6459                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6460         }
6461         pool->mtrs[0].offset = 0;
6462         *mtr_free = &pool->mtrs[0];
6463         return pool;
6464 }
6465
6466 /**
6467  * Release a flow meter into pool.
6468  *
6469  * @param[in] dev
6470  *   Pointer to the Ethernet device structure.
6471  * @param[in] mtr_idx
6472  *   Index to aso flow meter.
6473  */
6474 static void
6475 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6476 {
6477         struct mlx5_priv *priv = dev->data->dev_private;
6478         struct mlx5_aso_mtr_pools_mng *pools_mng =
6479                                 &priv->sh->mtrmng->pools_mng;
6480         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6481
6482         MLX5_ASSERT(aso_mtr);
6483         rte_spinlock_lock(&pools_mng->mtrsl);
6484         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6485         aso_mtr->state = ASO_METER_FREE;
6486         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6487         rte_spinlock_unlock(&pools_mng->mtrsl);
6488 }
6489
6490 /**
6491  * Allocate a aso flow meter.
6492  *
6493  * @param[in] dev
6494  *   Pointer to the Ethernet device structure.
6495  *
6496  * @return
6497  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6498  */
6499 static uint32_t
6500 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6501 {
6502         struct mlx5_priv *priv = dev->data->dev_private;
6503         struct mlx5_aso_mtr *mtr_free = NULL;
6504         struct mlx5_aso_mtr_pools_mng *pools_mng =
6505                                 &priv->sh->mtrmng->pools_mng;
6506         struct mlx5_aso_mtr_pool *pool;
6507         uint32_t mtr_idx = 0;
6508
6509         if (!priv->sh->cdev->config.devx) {
6510                 rte_errno = ENOTSUP;
6511                 return 0;
6512         }
6513         /* Allocate the flow meter memory. */
6514         /* Get free meters from management. */
6515         rte_spinlock_lock(&pools_mng->mtrsl);
6516         mtr_free = LIST_FIRST(&pools_mng->meters);
6517         if (mtr_free)
6518                 LIST_REMOVE(mtr_free, next);
6519         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6520                 rte_spinlock_unlock(&pools_mng->mtrsl);
6521                 return 0;
6522         }
6523         mtr_free->state = ASO_METER_WAIT;
6524         rte_spinlock_unlock(&pools_mng->mtrsl);
6525         pool = container_of(mtr_free,
6526                         struct mlx5_aso_mtr_pool,
6527                         mtrs[mtr_free->offset]);
6528         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6529         if (!mtr_free->fm.meter_action) {
6530 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6531                 struct rte_flow_error error;
6532                 uint8_t reg_id;
6533
6534                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6535                 mtr_free->fm.meter_action =
6536                         mlx5_glue->dv_create_flow_action_aso
6537                                                 (priv->sh->rx_domain,
6538                                                  pool->devx_obj->obj,
6539                                                  mtr_free->offset,
6540                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6541                                                  reg_id - REG_C_0);
6542 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6543                 if (!mtr_free->fm.meter_action) {
6544                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6545                         return 0;
6546                 }
6547         }
6548         return mtr_idx;
6549 }
6550
6551 /**
6552  * Verify the @p attributes will be correctly understood by the NIC and store
6553  * them in the @p flow if everything is correct.
6554  *
6555  * @param[in] dev
6556  *   Pointer to dev struct.
6557  * @param[in] attributes
6558  *   Pointer to flow attributes
6559  * @param[in] external
6560  *   This flow rule is created by request external to PMD.
6561  * @param[out] error
6562  *   Pointer to error structure.
6563  *
6564  * @return
6565  *   - 0 on success and non root table.
6566  *   - 1 on success and root table.
6567  *   - a negative errno value otherwise and rte_errno is set.
6568  */
6569 static int
6570 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6571                             const struct mlx5_flow_tunnel *tunnel,
6572                             const struct rte_flow_attr *attributes,
6573                             const struct flow_grp_info *grp_info,
6574                             struct rte_flow_error *error)
6575 {
6576         struct mlx5_priv *priv = dev->data->dev_private;
6577         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6578         int ret = 0;
6579
6580 #ifndef HAVE_MLX5DV_DR
6581         RTE_SET_USED(tunnel);
6582         RTE_SET_USED(grp_info);
6583         if (attributes->group)
6584                 return rte_flow_error_set(error, ENOTSUP,
6585                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6586                                           NULL,
6587                                           "groups are not supported");
6588 #else
6589         uint32_t table = 0;
6590
6591         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6592                                        grp_info, error);
6593         if (ret)
6594                 return ret;
6595         if (!table)
6596                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6597 #endif
6598         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6599             attributes->priority > lowest_priority)
6600                 return rte_flow_error_set(error, ENOTSUP,
6601                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6602                                           NULL,
6603                                           "priority out of range");
6604         if (attributes->transfer) {
6605                 if (!priv->sh->config.dv_esw_en)
6606                         return rte_flow_error_set
6607                                 (error, ENOTSUP,
6608                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6609                                  "E-Switch dr is not supported");
6610                 if (attributes->egress)
6611                         return rte_flow_error_set
6612                                 (error, ENOTSUP,
6613                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6614                                  "egress is not supported");
6615         }
6616         if (!(attributes->egress ^ attributes->ingress))
6617                 return rte_flow_error_set(error, ENOTSUP,
6618                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6619                                           "must specify exactly one of "
6620                                           "ingress or egress");
6621         return ret;
6622 }
6623
6624 static int
6625 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6626                         int64_t pattern_flags, uint64_t l3_flags,
6627                         uint64_t l4_flags, uint64_t ip4_flag,
6628                         struct rte_flow_error *error)
6629 {
6630         if (mask->l3_ok && !(pattern_flags & l3_flags))
6631                 return rte_flow_error_set(error, EINVAL,
6632                                           RTE_FLOW_ERROR_TYPE_ITEM,
6633                                           NULL, "missing L3 protocol");
6634
6635         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6636                 return rte_flow_error_set(error, EINVAL,
6637                                           RTE_FLOW_ERROR_TYPE_ITEM,
6638                                           NULL, "missing IPv4 protocol");
6639
6640         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6641                 return rte_flow_error_set(error, EINVAL,
6642                                           RTE_FLOW_ERROR_TYPE_ITEM,
6643                                           NULL, "missing L4 protocol");
6644
6645         return 0;
6646 }
6647
6648 static int
6649 flow_dv_validate_item_integrity_post(const struct
6650                                      rte_flow_item *integrity_items[2],
6651                                      int64_t pattern_flags,
6652                                      struct rte_flow_error *error)
6653 {
6654         const struct rte_flow_item_integrity *mask;
6655         int ret;
6656
6657         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6658                 mask = (typeof(mask))integrity_items[0]->mask;
6659                 ret = validate_integrity_bits(mask, pattern_flags,
6660                                               MLX5_FLOW_LAYER_OUTER_L3,
6661                                               MLX5_FLOW_LAYER_OUTER_L4,
6662                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6663                                               error);
6664                 if (ret)
6665                         return ret;
6666         }
6667         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6668                 mask = (typeof(mask))integrity_items[1]->mask;
6669                 ret = validate_integrity_bits(mask, pattern_flags,
6670                                               MLX5_FLOW_LAYER_INNER_L3,
6671                                               MLX5_FLOW_LAYER_INNER_L4,
6672                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6673                                               error);
6674                 if (ret)
6675                         return ret;
6676         }
6677         return 0;
6678 }
6679
6680 static int
6681 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6682                                 const struct rte_flow_item *integrity_item,
6683                                 uint64_t pattern_flags, uint64_t *last_item,
6684                                 const struct rte_flow_item *integrity_items[2],
6685                                 struct rte_flow_error *error)
6686 {
6687         struct mlx5_priv *priv = dev->data->dev_private;
6688         const struct rte_flow_item_integrity *mask = (typeof(mask))
6689                                                      integrity_item->mask;
6690         const struct rte_flow_item_integrity *spec = (typeof(spec))
6691                                                      integrity_item->spec;
6692
6693         if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
6694                 return rte_flow_error_set(error, ENOTSUP,
6695                                           RTE_FLOW_ERROR_TYPE_ITEM,
6696                                           integrity_item,
6697                                           "packet integrity integrity_item not supported");
6698         if (!spec)
6699                 return rte_flow_error_set(error, ENOTSUP,
6700                                           RTE_FLOW_ERROR_TYPE_ITEM,
6701                                           integrity_item,
6702                                           "no spec for integrity item");
6703         if (!mask)
6704                 mask = &rte_flow_item_integrity_mask;
6705         if (!mlx5_validate_integrity_item(mask))
6706                 return rte_flow_error_set(error, ENOTSUP,
6707                                           RTE_FLOW_ERROR_TYPE_ITEM,
6708                                           integrity_item,
6709                                           "unsupported integrity filter");
6710         if (spec->level > 1) {
6711                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6712                         return rte_flow_error_set
6713                                 (error, ENOTSUP,
6714                                  RTE_FLOW_ERROR_TYPE_ITEM,
6715                                  NULL, "multiple inner integrity items not supported");
6716                 integrity_items[1] = integrity_item;
6717                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6718         } else {
6719                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6720                         return rte_flow_error_set
6721                                 (error, ENOTSUP,
6722                                  RTE_FLOW_ERROR_TYPE_ITEM,
6723                                  NULL, "multiple outer integrity items not supported");
6724                 integrity_items[0] = integrity_item;
6725                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6726         }
6727         return 0;
6728 }
6729
6730 static int
6731 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6732                            const struct rte_flow_item *item,
6733                            uint64_t item_flags,
6734                            uint64_t *last_item,
6735                            bool is_inner,
6736                            struct rte_flow_error *error)
6737 {
6738         const struct rte_flow_item_flex *flow_spec = item->spec;
6739         const struct rte_flow_item_flex *flow_mask = item->mask;
6740         struct mlx5_flex_item *flex;
6741
6742         if (!flow_spec)
6743                 return rte_flow_error_set(error, EINVAL,
6744                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6745                                           "flex flow item spec cannot be NULL");
6746         if (!flow_mask)
6747                 return rte_flow_error_set(error, EINVAL,
6748                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6749                                           "flex flow item mask cannot be NULL");
6750         if (item->last)
6751                 return rte_flow_error_set(error, ENOTSUP,
6752                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6753                                           "flex flow item last not supported");
6754         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6755                 return rte_flow_error_set(error, EINVAL,
6756                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6757                                           "invalid flex flow item handle");
6758         flex = (struct mlx5_flex_item *)flow_spec->handle;
6759         switch (flex->tunnel_mode) {
6760         case FLEX_TUNNEL_MODE_SINGLE:
6761                 if (item_flags &
6762                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6763                         rte_flow_error_set(error, EINVAL,
6764                                            RTE_FLOW_ERROR_TYPE_ITEM,
6765                                            NULL, "multiple flex items not supported");
6766                 break;
6767         case FLEX_TUNNEL_MODE_OUTER:
6768                 if (is_inner)
6769                         rte_flow_error_set(error, EINVAL,
6770                                            RTE_FLOW_ERROR_TYPE_ITEM,
6771                                            NULL, "inner flex item was not configured");
6772                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6773                         rte_flow_error_set(error, ENOTSUP,
6774                                            RTE_FLOW_ERROR_TYPE_ITEM,
6775                                            NULL, "multiple flex items not supported");
6776                 break;
6777         case FLEX_TUNNEL_MODE_INNER:
6778                 if (!is_inner)
6779                         rte_flow_error_set(error, EINVAL,
6780                                            RTE_FLOW_ERROR_TYPE_ITEM,
6781                                            NULL, "outer flex item was not configured");
6782                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6783                         rte_flow_error_set(error, EINVAL,
6784                                            RTE_FLOW_ERROR_TYPE_ITEM,
6785                                            NULL, "multiple flex items not supported");
6786                 break;
6787         case FLEX_TUNNEL_MODE_MULTI:
6788                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6789                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6790                         rte_flow_error_set(error, EINVAL,
6791                                            RTE_FLOW_ERROR_TYPE_ITEM,
6792                                            NULL, "multiple flex items not supported");
6793                 }
6794                 break;
6795         case FLEX_TUNNEL_MODE_TUNNEL:
6796                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6797                         rte_flow_error_set(error, EINVAL,
6798                                            RTE_FLOW_ERROR_TYPE_ITEM,
6799                                            NULL, "multiple flex tunnel items not supported");
6800                 break;
6801         default:
6802                 rte_flow_error_set(error, EINVAL,
6803                                    RTE_FLOW_ERROR_TYPE_ITEM,
6804                                    NULL, "invalid flex item configuration");
6805         }
6806         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6807                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6808                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6809         return 0;
6810 }
6811
6812 /**
6813  * Internal validation function. For validating both actions and items.
6814  *
6815  * @param[in] dev
6816  *   Pointer to the rte_eth_dev structure.
6817  * @param[in] attr
6818  *   Pointer to the flow attributes.
6819  * @param[in] items
6820  *   Pointer to the list of items.
6821  * @param[in] actions
6822  *   Pointer to the list of actions.
6823  * @param[in] external
6824  *   This flow rule is created by request external to PMD.
6825  * @param[in] hairpin
6826  *   Number of hairpin TX actions, 0 means classic flow.
6827  * @param[out] error
6828  *   Pointer to the error structure.
6829  *
6830  * @return
6831  *   0 on success, a negative errno value otherwise and rte_errno is set.
6832  */
6833 static int
6834 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6835                  const struct rte_flow_item items[],
6836                  const struct rte_flow_action actions[],
6837                  bool external, int hairpin, struct rte_flow_error *error)
6838 {
6839         int ret;
6840         uint64_t action_flags = 0;
6841         uint64_t item_flags = 0;
6842         uint64_t last_item = 0;
6843         uint8_t next_protocol = 0xff;
6844         uint16_t ether_type = 0;
6845         int actions_n = 0;
6846         uint8_t item_ipv6_proto = 0;
6847         int fdb_mirror_limit = 0;
6848         int modify_after_mirror = 0;
6849         const struct rte_flow_item *geneve_item = NULL;
6850         const struct rte_flow_item *gre_item = NULL;
6851         const struct rte_flow_item *gtp_item = NULL;
6852         const struct rte_flow_action_raw_decap *decap;
6853         const struct rte_flow_action_raw_encap *encap;
6854         const struct rte_flow_action_rss *rss = NULL;
6855         const struct rte_flow_action_rss *sample_rss = NULL;
6856         const struct rte_flow_action_count *sample_count = NULL;
6857         const struct rte_flow_item_tcp nic_tcp_mask = {
6858                 .hdr = {
6859                         .tcp_flags = 0xFF,
6860                         .src_port = RTE_BE16(UINT16_MAX),
6861                         .dst_port = RTE_BE16(UINT16_MAX),
6862                 }
6863         };
6864         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6865                 .hdr = {
6866                         .src_addr =
6867                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6868                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6869                         .dst_addr =
6870                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6871                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6872                         .vtc_flow = RTE_BE32(0xffffffff),
6873                         .proto = 0xff,
6874                         .hop_limits = 0xff,
6875                 },
6876                 .has_frag_ext = 1,
6877         };
6878         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6879                 .hdr = {
6880                         .common = {
6881                                 .u32 =
6882                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6883                                         .type = 0xFF,
6884                                         }).u32),
6885                         },
6886                         .dummy[0] = 0xffffffff,
6887                 },
6888         };
6889         struct mlx5_priv *priv = dev->data->dev_private;
6890         struct mlx5_sh_config *dev_conf = &priv->sh->config;
6891         uint16_t queue_index = 0xFFFF;
6892         const struct rte_flow_item_vlan *vlan_m = NULL;
6893         uint32_t rw_act_num = 0;
6894         uint64_t is_root;
6895         const struct mlx5_flow_tunnel *tunnel;
6896         enum mlx5_tof_rule_type tof_rule_type;
6897         struct flow_grp_info grp_info = {
6898                 .external = !!external,
6899                 .transfer = !!attr->transfer,
6900                 .fdb_def_rule = !!priv->fdb_def_rule,
6901                 .std_tbl_fix = true,
6902         };
6903         const struct rte_eth_hairpin_conf *conf;
6904         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6905         const struct rte_flow_item *port_id_item = NULL;
6906         bool def_policy = false;
6907         uint16_t udp_dport = 0;
6908
6909         if (items == NULL)
6910                 return -1;
6911         tunnel = is_tunnel_offload_active(dev) ?
6912                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6913         if (tunnel) {
6914                 if (!dev_conf->dv_flow_en)
6915                         return rte_flow_error_set
6916                                 (error, ENOTSUP,
6917                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6918                                  NULL, "tunnel offload requires DV flow interface");
6919                 if (priv->representor)
6920                         return rte_flow_error_set
6921                                 (error, ENOTSUP,
6922                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6923                                  NULL, "decap not supported for VF representor");
6924                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6925                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6926                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6927                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6928                                         MLX5_FLOW_ACTION_DECAP;
6929                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6930                                         (dev, attr, tunnel, tof_rule_type);
6931         }
6932         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6933         if (ret < 0)
6934                 return ret;
6935         is_root = (uint64_t)ret;
6936         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6937                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6938                 int type = items->type;
6939
6940                 if (!mlx5_flow_os_item_supported(type))
6941                         return rte_flow_error_set(error, ENOTSUP,
6942                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6943                                                   NULL, "item not supported");
6944                 switch (type) {
6945                 case RTE_FLOW_ITEM_TYPE_VOID:
6946                         break;
6947                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6948                         ret = flow_dv_validate_item_port_id
6949                                         (dev, items, attr, item_flags, error);
6950                         if (ret < 0)
6951                                 return ret;
6952                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6953                         port_id_item = items;
6954                         break;
6955                 case RTE_FLOW_ITEM_TYPE_ETH:
6956                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6957                                                           true, error);
6958                         if (ret < 0)
6959                                 return ret;
6960                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6961                                              MLX5_FLOW_LAYER_OUTER_L2;
6962                         if (items->mask != NULL && items->spec != NULL) {
6963                                 ether_type =
6964                                         ((const struct rte_flow_item_eth *)
6965                                          items->spec)->type;
6966                                 ether_type &=
6967                                         ((const struct rte_flow_item_eth *)
6968                                          items->mask)->type;
6969                                 ether_type = rte_be_to_cpu_16(ether_type);
6970                         } else {
6971                                 ether_type = 0;
6972                         }
6973                         break;
6974                 case RTE_FLOW_ITEM_TYPE_VLAN:
6975                         ret = flow_dv_validate_item_vlan(items, item_flags,
6976                                                          dev, error);
6977                         if (ret < 0)
6978                                 return ret;
6979                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6980                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6981                         if (items->mask != NULL && items->spec != NULL) {
6982                                 ether_type =
6983                                         ((const struct rte_flow_item_vlan *)
6984                                          items->spec)->inner_type;
6985                                 ether_type &=
6986                                         ((const struct rte_flow_item_vlan *)
6987                                          items->mask)->inner_type;
6988                                 ether_type = rte_be_to_cpu_16(ether_type);
6989                         } else {
6990                                 ether_type = 0;
6991                         }
6992                         /* Store outer VLAN mask for of_push_vlan action. */
6993                         if (!tunnel)
6994                                 vlan_m = items->mask;
6995                         break;
6996                 case RTE_FLOW_ITEM_TYPE_IPV4:
6997                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6998                                                   &item_flags, &tunnel);
6999                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
7000                                                          last_item, ether_type,
7001                                                          error);
7002                         if (ret < 0)
7003                                 return ret;
7004                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7005                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7006                         if (items->mask != NULL &&
7007                             ((const struct rte_flow_item_ipv4 *)
7008                              items->mask)->hdr.next_proto_id) {
7009                                 next_protocol =
7010                                         ((const struct rte_flow_item_ipv4 *)
7011                                          (items->spec))->hdr.next_proto_id;
7012                                 next_protocol &=
7013                                         ((const struct rte_flow_item_ipv4 *)
7014                                          (items->mask))->hdr.next_proto_id;
7015                         } else {
7016                                 /* Reset for inner layer. */
7017                                 next_protocol = 0xff;
7018                         }
7019                         break;
7020                 case RTE_FLOW_ITEM_TYPE_IPV6:
7021                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7022                                                   &item_flags, &tunnel);
7023                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7024                                                            last_item,
7025                                                            ether_type,
7026                                                            &nic_ipv6_mask,
7027                                                            error);
7028                         if (ret < 0)
7029                                 return ret;
7030                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7031                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7032                         if (items->mask != NULL &&
7033                             ((const struct rte_flow_item_ipv6 *)
7034                              items->mask)->hdr.proto) {
7035                                 item_ipv6_proto =
7036                                         ((const struct rte_flow_item_ipv6 *)
7037                                          items->spec)->hdr.proto;
7038                                 next_protocol =
7039                                         ((const struct rte_flow_item_ipv6 *)
7040                                          items->spec)->hdr.proto;
7041                                 next_protocol &=
7042                                         ((const struct rte_flow_item_ipv6 *)
7043                                          items->mask)->hdr.proto;
7044                         } else {
7045                                 /* Reset for inner layer. */
7046                                 next_protocol = 0xff;
7047                         }
7048                         break;
7049                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7050                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7051                                                                   item_flags,
7052                                                                   error);
7053                         if (ret < 0)
7054                                 return ret;
7055                         last_item = tunnel ?
7056                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7057                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7058                         if (items->mask != NULL &&
7059                             ((const struct rte_flow_item_ipv6_frag_ext *)
7060                              items->mask)->hdr.next_header) {
7061                                 next_protocol =
7062                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7063                                  items->spec)->hdr.next_header;
7064                                 next_protocol &=
7065                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7066                                  items->mask)->hdr.next_header;
7067                         } else {
7068                                 /* Reset for inner layer. */
7069                                 next_protocol = 0xff;
7070                         }
7071                         break;
7072                 case RTE_FLOW_ITEM_TYPE_TCP:
7073                         ret = mlx5_flow_validate_item_tcp
7074                                                 (items, item_flags,
7075                                                  next_protocol,
7076                                                  &nic_tcp_mask,
7077                                                  error);
7078                         if (ret < 0)
7079                                 return ret;
7080                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7081                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7082                         break;
7083                 case RTE_FLOW_ITEM_TYPE_UDP:
7084                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7085                                                           next_protocol,
7086                                                           error);
7087                         const struct rte_flow_item_udp *spec = items->spec;
7088                         const struct rte_flow_item_udp *mask = items->mask;
7089                         if (!mask)
7090                                 mask = &rte_flow_item_udp_mask;
7091                         if (spec != NULL)
7092                                 udp_dport = rte_be_to_cpu_16
7093                                                 (spec->hdr.dst_port &
7094                                                  mask->hdr.dst_port);
7095                         if (ret < 0)
7096                                 return ret;
7097                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7098                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7099                         break;
7100                 case RTE_FLOW_ITEM_TYPE_GRE:
7101                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7102                                                           next_protocol, error);
7103                         if (ret < 0)
7104                                 return ret;
7105                         gre_item = items;
7106                         last_item = MLX5_FLOW_LAYER_GRE;
7107                         break;
7108                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7109                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7110                                                             next_protocol,
7111                                                             error);
7112                         if (ret < 0)
7113                                 return ret;
7114                         last_item = MLX5_FLOW_LAYER_NVGRE;
7115                         break;
7116                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7117                         ret = mlx5_flow_validate_item_gre_key
7118                                 (items, item_flags, gre_item, error);
7119                         if (ret < 0)
7120                                 return ret;
7121                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7122                         break;
7123                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7124                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7125                                                             items, item_flags,
7126                                                             attr, error);
7127                         if (ret < 0)
7128                                 return ret;
7129                         last_item = MLX5_FLOW_LAYER_VXLAN;
7130                         break;
7131                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7132                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7133                                                                 item_flags, dev,
7134                                                                 error);
7135                         if (ret < 0)
7136                                 return ret;
7137                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7138                         break;
7139                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7140                         ret = mlx5_flow_validate_item_geneve(items,
7141                                                              item_flags, dev,
7142                                                              error);
7143                         if (ret < 0)
7144                                 return ret;
7145                         geneve_item = items;
7146                         last_item = MLX5_FLOW_LAYER_GENEVE;
7147                         break;
7148                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7149                         ret = mlx5_flow_validate_item_geneve_opt(items,
7150                                                                  last_item,
7151                                                                  geneve_item,
7152                                                                  dev,
7153                                                                  error);
7154                         if (ret < 0)
7155                                 return ret;
7156                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7157                         break;
7158                 case RTE_FLOW_ITEM_TYPE_MPLS:
7159                         ret = mlx5_flow_validate_item_mpls(dev, items,
7160                                                            item_flags,
7161                                                            last_item, error);
7162                         if (ret < 0)
7163                                 return ret;
7164                         last_item = MLX5_FLOW_LAYER_MPLS;
7165                         break;
7166
7167                 case RTE_FLOW_ITEM_TYPE_MARK:
7168                         ret = flow_dv_validate_item_mark(dev, items, attr,
7169                                                          error);
7170                         if (ret < 0)
7171                                 return ret;
7172                         last_item = MLX5_FLOW_ITEM_MARK;
7173                         break;
7174                 case RTE_FLOW_ITEM_TYPE_META:
7175                         ret = flow_dv_validate_item_meta(dev, items, attr,
7176                                                          error);
7177                         if (ret < 0)
7178                                 return ret;
7179                         last_item = MLX5_FLOW_ITEM_METADATA;
7180                         break;
7181                 case RTE_FLOW_ITEM_TYPE_ICMP:
7182                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7183                                                            next_protocol,
7184                                                            error);
7185                         if (ret < 0)
7186                                 return ret;
7187                         last_item = MLX5_FLOW_LAYER_ICMP;
7188                         break;
7189                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7190                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7191                                                             next_protocol,
7192                                                             error);
7193                         if (ret < 0)
7194                                 return ret;
7195                         item_ipv6_proto = IPPROTO_ICMPV6;
7196                         last_item = MLX5_FLOW_LAYER_ICMP6;
7197                         break;
7198                 case RTE_FLOW_ITEM_TYPE_TAG:
7199                         ret = flow_dv_validate_item_tag(dev, items,
7200                                                         attr, error);
7201                         if (ret < 0)
7202                                 return ret;
7203                         last_item = MLX5_FLOW_ITEM_TAG;
7204                         break;
7205                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7206                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7207                         break;
7208                 case RTE_FLOW_ITEM_TYPE_GTP:
7209                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7210                                                         error);
7211                         if (ret < 0)
7212                                 return ret;
7213                         gtp_item = items;
7214                         last_item = MLX5_FLOW_LAYER_GTP;
7215                         break;
7216                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7217                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7218                                                             gtp_item, attr,
7219                                                             error);
7220                         if (ret < 0)
7221                                 return ret;
7222                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7223                         break;
7224                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7225                         /* Capacity will be checked in the translate stage. */
7226                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7227                                                             last_item,
7228                                                             ether_type,
7229                                                             &nic_ecpri_mask,
7230                                                             error);
7231                         if (ret < 0)
7232                                 return ret;
7233                         last_item = MLX5_FLOW_LAYER_ECPRI;
7234                         break;
7235                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7236                         ret = flow_dv_validate_item_integrity(dev, items,
7237                                                               item_flags,
7238                                                               &last_item,
7239                                                               integrity_items,
7240                                                               error);
7241                         if (ret < 0)
7242                                 return ret;
7243                         break;
7244                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7245                         ret = flow_dv_validate_item_aso_ct(dev, items,
7246                                                            &item_flags, error);
7247                         if (ret < 0)
7248                                 return ret;
7249                         break;
7250                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7251                         /* tunnel offload item was processed before
7252                          * list it here as a supported type
7253                          */
7254                         break;
7255                 case RTE_FLOW_ITEM_TYPE_FLEX:
7256                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7257                                                          &last_item,
7258                                                          tunnel != 0, error);
7259                         if (ret < 0)
7260                                 return ret;
7261                         break;
7262                 default:
7263                         return rte_flow_error_set(error, ENOTSUP,
7264                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7265                                                   NULL, "item not supported");
7266                 }
7267                 item_flags |= last_item;
7268         }
7269         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7270                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7271                                                            item_flags, error);
7272                 if (ret)
7273                         return ret;
7274         }
7275         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7276                 int type = actions->type;
7277                 bool shared_count = false;
7278
7279                 if (!mlx5_flow_os_action_supported(type))
7280                         return rte_flow_error_set(error, ENOTSUP,
7281                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7282                                                   actions,
7283                                                   "action not supported");
7284                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7285                         return rte_flow_error_set(error, ENOTSUP,
7286                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7287                                                   actions, "too many actions");
7288                 if (action_flags &
7289                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7290                         return rte_flow_error_set(error, ENOTSUP,
7291                                 RTE_FLOW_ERROR_TYPE_ACTION,
7292                                 NULL, "meter action with policy "
7293                                 "must be the last action");
7294                 switch (type) {
7295                 case RTE_FLOW_ACTION_TYPE_VOID:
7296                         break;
7297                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7298                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7299                         ret = flow_dv_validate_action_port_id(dev,
7300                                                               action_flags,
7301                                                               actions,
7302                                                               attr,
7303                                                               error);
7304                         if (ret)
7305                                 return ret;
7306                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7307                         ++actions_n;
7308                         break;
7309                 case RTE_FLOW_ACTION_TYPE_FLAG:
7310                         ret = flow_dv_validate_action_flag(dev, action_flags,
7311                                                            attr, error);
7312                         if (ret < 0)
7313                                 return ret;
7314                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7315                                 /* Count all modify-header actions as one. */
7316                                 if (!(action_flags &
7317                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7318                                         ++actions_n;
7319                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7320                                                 MLX5_FLOW_ACTION_MARK_EXT;
7321                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7322                                         modify_after_mirror = 1;
7323
7324                         } else {
7325                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7326                                 ++actions_n;
7327                         }
7328                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7329                         break;
7330                 case RTE_FLOW_ACTION_TYPE_MARK:
7331                         ret = flow_dv_validate_action_mark(dev, actions,
7332                                                            action_flags,
7333                                                            attr, error);
7334                         if (ret < 0)
7335                                 return ret;
7336                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7337                                 /* Count all modify-header actions as one. */
7338                                 if (!(action_flags &
7339                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7340                                         ++actions_n;
7341                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7342                                                 MLX5_FLOW_ACTION_MARK_EXT;
7343                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7344                                         modify_after_mirror = 1;
7345                         } else {
7346                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7347                                 ++actions_n;
7348                         }
7349                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7350                         break;
7351                 case RTE_FLOW_ACTION_TYPE_SET_META:
7352                         ret = flow_dv_validate_action_set_meta(dev, actions,
7353                                                                action_flags,
7354                                                                attr, error);
7355                         if (ret < 0)
7356                                 return ret;
7357                         /* Count all modify-header actions as one action. */
7358                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7359                                 ++actions_n;
7360                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7361                                 modify_after_mirror = 1;
7362                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7363                         rw_act_num += MLX5_ACT_NUM_SET_META;
7364                         break;
7365                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7366                         ret = flow_dv_validate_action_set_tag(dev, actions,
7367                                                               action_flags,
7368                                                               attr, error);
7369                         if (ret < 0)
7370                                 return ret;
7371                         /* Count all modify-header actions as one action. */
7372                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7373                                 ++actions_n;
7374                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7375                                 modify_after_mirror = 1;
7376                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7377                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7378                         break;
7379                 case RTE_FLOW_ACTION_TYPE_DROP:
7380                         ret = mlx5_flow_validate_action_drop(action_flags,
7381                                                              attr, error);
7382                         if (ret < 0)
7383                                 return ret;
7384                         action_flags |= MLX5_FLOW_ACTION_DROP;
7385                         ++actions_n;
7386                         break;
7387                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7388                         ret = mlx5_flow_validate_action_queue(actions,
7389                                                               action_flags, dev,
7390                                                               attr, error);
7391                         if (ret < 0)
7392                                 return ret;
7393                         queue_index = ((const struct rte_flow_action_queue *)
7394                                                         (actions->conf))->index;
7395                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7396                         ++actions_n;
7397                         break;
7398                 case RTE_FLOW_ACTION_TYPE_RSS:
7399                         rss = actions->conf;
7400                         ret = mlx5_flow_validate_action_rss(actions,
7401                                                             action_flags, dev,
7402                                                             attr, item_flags,
7403                                                             error);
7404                         if (ret < 0)
7405                                 return ret;
7406                         if (rss && sample_rss &&
7407                             (sample_rss->level != rss->level ||
7408                             sample_rss->types != rss->types))
7409                                 return rte_flow_error_set(error, ENOTSUP,
7410                                         RTE_FLOW_ERROR_TYPE_ACTION,
7411                                         NULL,
7412                                         "Can't use the different RSS types "
7413                                         "or level in the same flow");
7414                         if (rss != NULL && rss->queue_num)
7415                                 queue_index = rss->queue[0];
7416                         action_flags |= MLX5_FLOW_ACTION_RSS;
7417                         ++actions_n;
7418                         break;
7419                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7420                         ret =
7421                         mlx5_flow_validate_action_default_miss(action_flags,
7422                                         attr, error);
7423                         if (ret < 0)
7424                                 return ret;
7425                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7426                         ++actions_n;
7427                         break;
7428                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7429                         shared_count = true;
7430                         /* fall-through. */
7431                 case RTE_FLOW_ACTION_TYPE_COUNT:
7432                         ret = flow_dv_validate_action_count(dev, shared_count,
7433                                                             action_flags,
7434                                                             error);
7435                         if (ret < 0)
7436                                 return ret;
7437                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7438                         ++actions_n;
7439                         break;
7440                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7441                         if (flow_dv_validate_action_pop_vlan(dev,
7442                                                              action_flags,
7443                                                              actions,
7444                                                              item_flags, attr,
7445                                                              error))
7446                                 return -rte_errno;
7447                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7448                                 modify_after_mirror = 1;
7449                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7450                         ++actions_n;
7451                         break;
7452                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7453                         ret = flow_dv_validate_action_push_vlan(dev,
7454                                                                 action_flags,
7455                                                                 vlan_m,
7456                                                                 actions, attr,
7457                                                                 error);
7458                         if (ret < 0)
7459                                 return ret;
7460                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7461                                 modify_after_mirror = 1;
7462                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7463                         ++actions_n;
7464                         break;
7465                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7466                         ret = flow_dv_validate_action_set_vlan_pcp
7467                                                 (action_flags, actions, error);
7468                         if (ret < 0)
7469                                 return ret;
7470                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7471                                 modify_after_mirror = 1;
7472                         /* Count PCP with push_vlan command. */
7473                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7474                         break;
7475                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7476                         ret = flow_dv_validate_action_set_vlan_vid
7477                                                 (item_flags, action_flags,
7478                                                  actions, error);
7479                         if (ret < 0)
7480                                 return ret;
7481                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7482                                 modify_after_mirror = 1;
7483                         /* Count VID with push_vlan command. */
7484                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7485                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7486                         break;
7487                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7488                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7489                         ret = flow_dv_validate_action_l2_encap(dev,
7490                                                                action_flags,
7491                                                                actions, attr,
7492                                                                error);
7493                         if (ret < 0)
7494                                 return ret;
7495                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7496                         ++actions_n;
7497                         break;
7498                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7499                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7500                         ret = flow_dv_validate_action_decap(dev, action_flags,
7501                                                             actions, item_flags,
7502                                                             attr, error);
7503                         if (ret < 0)
7504                                 return ret;
7505                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7506                                 modify_after_mirror = 1;
7507                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7508                         ++actions_n;
7509                         break;
7510                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7511                         ret = flow_dv_validate_action_raw_encap_decap
7512                                 (dev, NULL, actions->conf, attr, &action_flags,
7513                                  &actions_n, actions, item_flags, error);
7514                         if (ret < 0)
7515                                 return ret;
7516                         break;
7517                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7518                         decap = actions->conf;
7519                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7520                                 ;
7521                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7522                                 encap = NULL;
7523                                 actions--;
7524                         } else {
7525                                 encap = actions->conf;
7526                         }
7527                         ret = flow_dv_validate_action_raw_encap_decap
7528                                            (dev,
7529                                             decap ? decap : &empty_decap, encap,
7530                                             attr, &action_flags, &actions_n,
7531                                             actions, item_flags, error);
7532                         if (ret < 0)
7533                                 return ret;
7534                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7535                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7536                                 modify_after_mirror = 1;
7537                         break;
7538                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7539                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7540                         ret = flow_dv_validate_action_modify_mac(action_flags,
7541                                                                  actions,
7542                                                                  item_flags,
7543                                                                  error);
7544                         if (ret < 0)
7545                                 return ret;
7546                         /* Count all modify-header actions as one action. */
7547                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7548                                 ++actions_n;
7549                         action_flags |= actions->type ==
7550                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7551                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7552                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7553                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7554                                 modify_after_mirror = 1;
7555                         /*
7556                          * Even if the source and destination MAC addresses have
7557                          * overlap in the header with 4B alignment, the convert
7558                          * function will handle them separately and 4 SW actions
7559                          * will be created. And 2 actions will be added each
7560                          * time no matter how many bytes of address will be set.
7561                          */
7562                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7563                         break;
7564                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7565                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7566                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7567                                                                   actions,
7568                                                                   item_flags,
7569                                                                   error);
7570                         if (ret < 0)
7571                                 return ret;
7572                         /* Count all modify-header actions as one action. */
7573                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7574                                 ++actions_n;
7575                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7576                                 modify_after_mirror = 1;
7577                         action_flags |= actions->type ==
7578                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7579                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7580                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7581                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7582                         break;
7583                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7584                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7585                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7586                                                                   actions,
7587                                                                   item_flags,
7588                                                                   error);
7589                         if (ret < 0)
7590                                 return ret;
7591                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7592                                 return rte_flow_error_set(error, ENOTSUP,
7593                                         RTE_FLOW_ERROR_TYPE_ACTION,
7594                                         actions,
7595                                         "Can't change header "
7596                                         "with ICMPv6 proto");
7597                         /* Count all modify-header actions as one action. */
7598                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7599                                 ++actions_n;
7600                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7601                                 modify_after_mirror = 1;
7602                         action_flags |= actions->type ==
7603                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7604                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7605                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7606                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7607                         break;
7608                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7609                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7610                         ret = flow_dv_validate_action_modify_tp(action_flags,
7611                                                                 actions,
7612                                                                 item_flags,
7613                                                                 error);
7614                         if (ret < 0)
7615                                 return ret;
7616                         /* Count all modify-header actions as one action. */
7617                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7618                                 ++actions_n;
7619                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7620                                 modify_after_mirror = 1;
7621                         action_flags |= actions->type ==
7622                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7623                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7624                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7625                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7626                         break;
7627                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7628                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7629                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7630                                                                  actions,
7631                                                                  item_flags,
7632                                                                  error);
7633                         if (ret < 0)
7634                                 return ret;
7635                         /* Count all modify-header actions as one action. */
7636                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7637                                 ++actions_n;
7638                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7639                                 modify_after_mirror = 1;
7640                         action_flags |= actions->type ==
7641                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7642                                                 MLX5_FLOW_ACTION_SET_TTL :
7643                                                 MLX5_FLOW_ACTION_DEC_TTL;
7644                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7645                         break;
7646                 case RTE_FLOW_ACTION_TYPE_JUMP:
7647                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7648                                                            action_flags,
7649                                                            attr, external,
7650                                                            error);
7651                         if (ret)
7652                                 return ret;
7653                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7654                             fdb_mirror_limit)
7655                                 return rte_flow_error_set(error, EINVAL,
7656                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7657                                                   NULL,
7658                                                   "sample and jump action combination is not supported");
7659                         ++actions_n;
7660                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7661                         break;
7662                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7663                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7664                         ret = flow_dv_validate_action_modify_tcp_seq
7665                                                                 (action_flags,
7666                                                                  actions,
7667                                                                  item_flags,
7668                                                                  error);
7669                         if (ret < 0)
7670                                 return ret;
7671                         /* Count all modify-header actions as one action. */
7672                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7673                                 ++actions_n;
7674                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7675                                 modify_after_mirror = 1;
7676                         action_flags |= actions->type ==
7677                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7678                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7679                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7680                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7681                         break;
7682                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7683                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7684                         ret = flow_dv_validate_action_modify_tcp_ack
7685                                                                 (action_flags,
7686                                                                  actions,
7687                                                                  item_flags,
7688                                                                  error);
7689                         if (ret < 0)
7690                                 return ret;
7691                         /* Count all modify-header actions as one action. */
7692                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7693                                 ++actions_n;
7694                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7695                                 modify_after_mirror = 1;
7696                         action_flags |= actions->type ==
7697                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7698                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7699                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7700                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7701                         break;
7702                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7703                         break;
7704                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7705                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7706                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7707                         break;
7708                 case RTE_FLOW_ACTION_TYPE_METER:
7709                         ret = mlx5_flow_validate_action_meter(dev,
7710                                                               action_flags,
7711                                                               item_flags,
7712                                                               actions, attr,
7713                                                               port_id_item,
7714                                                               &def_policy,
7715                                                               error);
7716                         if (ret < 0)
7717                                 return ret;
7718                         action_flags |= MLX5_FLOW_ACTION_METER;
7719                         if (!def_policy)
7720                                 action_flags |=
7721                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7722                         ++actions_n;
7723                         /* Meter action will add one more TAG action. */
7724                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7725                         break;
7726                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7727                         if (!attr->transfer && !attr->group)
7728                                 return rte_flow_error_set(error, ENOTSUP,
7729                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7730                                                                            NULL,
7731                           "Shared ASO age action is not supported for group 0");
7732                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7733                                 return rte_flow_error_set
7734                                                   (error, EINVAL,
7735                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7736                                                    NULL,
7737                                                    "duplicate age actions set");
7738                         action_flags |= MLX5_FLOW_ACTION_AGE;
7739                         ++actions_n;
7740                         break;
7741                 case RTE_FLOW_ACTION_TYPE_AGE:
7742                         ret = flow_dv_validate_action_age(action_flags,
7743                                                           actions, dev,
7744                                                           error);
7745                         if (ret < 0)
7746                                 return ret;
7747                         /*
7748                          * Validate the regular AGE action (using counter)
7749                          * mutual exclusion with share counter actions.
7750                          */
7751                         if (!priv->sh->flow_hit_aso_en) {
7752                                 if (shared_count)
7753                                         return rte_flow_error_set
7754                                                 (error, EINVAL,
7755                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7756                                                 NULL,
7757                                                 "old age and shared count combination is not supported");
7758                                 if (sample_count)
7759                                         return rte_flow_error_set
7760                                                 (error, EINVAL,
7761                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7762                                                 NULL,
7763                                                 "old age action and count must be in the same sub flow");
7764                         }
7765                         action_flags |= MLX5_FLOW_ACTION_AGE;
7766                         ++actions_n;
7767                         break;
7768                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7769                         ret = flow_dv_validate_action_modify_ipv4_dscp
7770                                                          (action_flags,
7771                                                           actions,
7772                                                           item_flags,
7773                                                           error);
7774                         if (ret < 0)
7775                                 return ret;
7776                         /* Count all modify-header actions as one action. */
7777                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7778                                 ++actions_n;
7779                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7780                                 modify_after_mirror = 1;
7781                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7782                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7783                         break;
7784                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7785                         ret = flow_dv_validate_action_modify_ipv6_dscp
7786                                                                 (action_flags,
7787                                                                  actions,
7788                                                                  item_flags,
7789                                                                  error);
7790                         if (ret < 0)
7791                                 return ret;
7792                         /* Count all modify-header actions as one action. */
7793                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7794                                 ++actions_n;
7795                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7796                                 modify_after_mirror = 1;
7797                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7798                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7799                         break;
7800                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7801                         ret = flow_dv_validate_action_sample(&action_flags,
7802                                                              actions, dev,
7803                                                              attr, item_flags,
7804                                                              rss, &sample_rss,
7805                                                              &sample_count,
7806                                                              &fdb_mirror_limit,
7807                                                              error);
7808                         if (ret < 0)
7809                                 return ret;
7810                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7811                         ++actions_n;
7812                         break;
7813                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7814                         ret = flow_dv_validate_action_modify_field(dev,
7815                                                                    action_flags,
7816                                                                    actions,
7817                                                                    attr,
7818                                                                    error);
7819                         if (ret < 0)
7820                                 return ret;
7821                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7822                                 modify_after_mirror = 1;
7823                         /* Count all modify-header actions as one action. */
7824                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7825                                 ++actions_n;
7826                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7827                         rw_act_num += ret;
7828                         break;
7829                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7830                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7831                                                              item_flags, attr,
7832                                                              error);
7833                         if (ret < 0)
7834                                 return ret;
7835                         action_flags |= MLX5_FLOW_ACTION_CT;
7836                         break;
7837                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7838                         /* tunnel offload action was processed before
7839                          * list it here as a supported type
7840                          */
7841                         break;
7842                 default:
7843                         return rte_flow_error_set(error, ENOTSUP,
7844                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7845                                                   actions,
7846                                                   "action not supported");
7847                 }
7848         }
7849         /*
7850          * Validate actions in flow rules
7851          * - Explicit decap action is prohibited by the tunnel offload API.
7852          * - Drop action in tunnel steer rule is prohibited by the API.
7853          * - Application cannot use MARK action because it's value can mask
7854          *   tunnel default miss notification.
7855          * - JUMP in tunnel match rule has no support in current PMD
7856          *   implementation.
7857          * - TAG & META are reserved for future uses.
7858          */
7859         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7860                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7861                                             MLX5_FLOW_ACTION_MARK     |
7862                                             MLX5_FLOW_ACTION_SET_TAG  |
7863                                             MLX5_FLOW_ACTION_SET_META |
7864                                             MLX5_FLOW_ACTION_DROP;
7865
7866                 if (action_flags & bad_actions_mask)
7867                         return rte_flow_error_set
7868                                         (error, EINVAL,
7869                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7870                                         "Invalid RTE action in tunnel "
7871                                         "set decap rule");
7872                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7873                         return rte_flow_error_set
7874                                         (error, EINVAL,
7875                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7876                                         "tunnel set decap rule must terminate "
7877                                         "with JUMP");
7878                 if (!attr->ingress)
7879                         return rte_flow_error_set
7880                                         (error, EINVAL,
7881                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7882                                         "tunnel flows for ingress traffic only");
7883         }
7884         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7885                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7886                                             MLX5_FLOW_ACTION_MARK    |
7887                                             MLX5_FLOW_ACTION_SET_TAG |
7888                                             MLX5_FLOW_ACTION_SET_META;
7889
7890                 if (action_flags & bad_actions_mask)
7891                         return rte_flow_error_set
7892                                         (error, EINVAL,
7893                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7894                                         "Invalid RTE action in tunnel "
7895                                         "set match rule");
7896         }
7897         /*
7898          * Validate the drop action mutual exclusion with other actions.
7899          * Drop action is mutually-exclusive with any other action, except for
7900          * Count action.
7901          * Drop action compatibility with tunnel offload was already validated.
7902          */
7903         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7904                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7905         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7906             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7907                 return rte_flow_error_set(error, EINVAL,
7908                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7909                                           "Drop action is mutually-exclusive "
7910                                           "with any other action, except for "
7911                                           "Count action");
7912         /* Eswitch has few restrictions on using items and actions */
7913         if (attr->transfer) {
7914                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7915                     action_flags & MLX5_FLOW_ACTION_FLAG)
7916                         return rte_flow_error_set(error, ENOTSUP,
7917                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7918                                                   NULL,
7919                                                   "unsupported action FLAG");
7920                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7921                     action_flags & MLX5_FLOW_ACTION_MARK)
7922                         return rte_flow_error_set(error, ENOTSUP,
7923                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7924                                                   NULL,
7925                                                   "unsupported action MARK");
7926                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7927                         return rte_flow_error_set(error, ENOTSUP,
7928                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7929                                                   NULL,
7930                                                   "unsupported action QUEUE");
7931                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7932                         return rte_flow_error_set(error, ENOTSUP,
7933                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7934                                                   NULL,
7935                                                   "unsupported action RSS");
7936                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7937                         return rte_flow_error_set(error, EINVAL,
7938                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7939                                                   actions,
7940                                                   "no fate action is found");
7941         } else {
7942                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7943                         return rte_flow_error_set(error, EINVAL,
7944                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7945                                                   actions,
7946                                                   "no fate action is found");
7947         }
7948         /*
7949          * Continue validation for Xcap and VLAN actions.
7950          * If hairpin is working in explicit TX rule mode, there is no actions
7951          * splitting and the validation of hairpin ingress flow should be the
7952          * same as other standard flows.
7953          */
7954         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7955                              MLX5_FLOW_VLAN_ACTIONS)) &&
7956             (queue_index == 0xFFFF ||
7957              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7958              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7959              conf->tx_explicit != 0))) {
7960                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7961                     MLX5_FLOW_XCAP_ACTIONS)
7962                         return rte_flow_error_set(error, ENOTSUP,
7963                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7964                                                   NULL, "encap and decap "
7965                                                   "combination aren't supported");
7966                 if (!attr->transfer && attr->ingress) {
7967                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7968                                 return rte_flow_error_set
7969                                                 (error, ENOTSUP,
7970                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7971                                                  NULL, "encap is not supported"
7972                                                  " for ingress traffic");
7973                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7974                                 return rte_flow_error_set
7975                                                 (error, ENOTSUP,
7976                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7977                                                  NULL, "push VLAN action not "
7978                                                  "supported for ingress");
7979                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7980                                         MLX5_FLOW_VLAN_ACTIONS)
7981                                 return rte_flow_error_set
7982                                                 (error, ENOTSUP,
7983                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7984                                                  NULL, "no support for "
7985                                                  "multiple VLAN actions");
7986                 }
7987         }
7988         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7989                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7990                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7991                         attr->ingress)
7992                         return rte_flow_error_set
7993                                 (error, ENOTSUP,
7994                                 RTE_FLOW_ERROR_TYPE_ACTION,
7995                                 NULL, "fate action not supported for "
7996                                 "meter with policy");
7997                 if (attr->egress) {
7998                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7999                                 return rte_flow_error_set
8000                                         (error, ENOTSUP,
8001                                         RTE_FLOW_ERROR_TYPE_ACTION,
8002                                         NULL, "modify header action in egress "
8003                                         "cannot be done before meter action");
8004                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8005                                 return rte_flow_error_set
8006                                         (error, ENOTSUP,
8007                                         RTE_FLOW_ERROR_TYPE_ACTION,
8008                                         NULL, "encap action in egress "
8009                                         "cannot be done before meter action");
8010                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8011                                 return rte_flow_error_set
8012                                         (error, ENOTSUP,
8013                                         RTE_FLOW_ERROR_TYPE_ACTION,
8014                                         NULL, "push vlan action in egress "
8015                                         "cannot be done before meter action");
8016                 }
8017         }
8018         /*
8019          * Hairpin flow will add one more TAG action in TX implicit mode.
8020          * In TX explicit mode, there will be no hairpin flow ID.
8021          */
8022         if (hairpin > 0)
8023                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8024         /* extra metadata enabled: one more TAG action will be add. */
8025         if (dev_conf->dv_flow_en &&
8026             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8027             mlx5_flow_ext_mreg_supported(dev))
8028                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8029         if (rw_act_num >
8030                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8031                 return rte_flow_error_set(error, ENOTSUP,
8032                                           RTE_FLOW_ERROR_TYPE_ACTION,
8033                                           NULL, "too many header modify"
8034                                           " actions to support");
8035         }
8036         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8037         if (fdb_mirror_limit && modify_after_mirror)
8038                 return rte_flow_error_set(error, EINVAL,
8039                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8040                                 "sample before modify action is not supported");
8041         return 0;
8042 }
8043
8044 /**
8045  * Internal preparation function. Allocates the DV flow size,
8046  * this size is constant.
8047  *
8048  * @param[in] dev
8049  *   Pointer to the rte_eth_dev structure.
8050  * @param[in] attr
8051  *   Pointer to the flow attributes.
8052  * @param[in] items
8053  *   Pointer to the list of items.
8054  * @param[in] actions
8055  *   Pointer to the list of actions.
8056  * @param[out] error
8057  *   Pointer to the error structure.
8058  *
8059  * @return
8060  *   Pointer to mlx5_flow object on success,
8061  *   otherwise NULL and rte_errno is set.
8062  */
8063 static struct mlx5_flow *
8064 flow_dv_prepare(struct rte_eth_dev *dev,
8065                 const struct rte_flow_attr *attr __rte_unused,
8066                 const struct rte_flow_item items[] __rte_unused,
8067                 const struct rte_flow_action actions[] __rte_unused,
8068                 struct rte_flow_error *error)
8069 {
8070         uint32_t handle_idx = 0;
8071         struct mlx5_flow *dev_flow;
8072         struct mlx5_flow_handle *dev_handle;
8073         struct mlx5_priv *priv = dev->data->dev_private;
8074         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8075
8076         MLX5_ASSERT(wks);
8077         wks->skip_matcher_reg = 0;
8078         wks->policy = NULL;
8079         wks->final_policy = NULL;
8080         /* In case of corrupting the memory. */
8081         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8082                 rte_flow_error_set(error, ENOSPC,
8083                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8084                                    "not free temporary device flow");
8085                 return NULL;
8086         }
8087         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8088                                    &handle_idx);
8089         if (!dev_handle) {
8090                 rte_flow_error_set(error, ENOMEM,
8091                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8092                                    "not enough memory to create flow handle");
8093                 return NULL;
8094         }
8095         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8096         dev_flow = &wks->flows[wks->flow_idx++];
8097         memset(dev_flow, 0, sizeof(*dev_flow));
8098         dev_flow->handle = dev_handle;
8099         dev_flow->handle_idx = handle_idx;
8100         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8101         dev_flow->ingress = attr->ingress;
8102         dev_flow->dv.transfer = attr->transfer;
8103         return dev_flow;
8104 }
8105
8106 #ifdef RTE_LIBRTE_MLX5_DEBUG
8107 /**
8108  * Sanity check for match mask and value. Similar to check_valid_spec() in
8109  * kernel driver. If unmasked bit is present in value, it returns failure.
8110  *
8111  * @param match_mask
8112  *   pointer to match mask buffer.
8113  * @param match_value
8114  *   pointer to match value buffer.
8115  *
8116  * @return
8117  *   0 if valid, -EINVAL otherwise.
8118  */
8119 static int
8120 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8121 {
8122         uint8_t *m = match_mask;
8123         uint8_t *v = match_value;
8124         unsigned int i;
8125
8126         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8127                 if (v[i] & ~m[i]) {
8128                         DRV_LOG(ERR,
8129                                 "match_value differs from match_criteria"
8130                                 " %p[%u] != %p[%u]",
8131                                 match_value, i, match_mask, i);
8132                         return -EINVAL;
8133                 }
8134         }
8135         return 0;
8136 }
8137 #endif
8138
8139 /**
8140  * Add match of ip_version.
8141  *
8142  * @param[in] group
8143  *   Flow group.
8144  * @param[in] headers_v
8145  *   Values header pointer.
8146  * @param[in] headers_m
8147  *   Masks header pointer.
8148  * @param[in] ip_version
8149  *   The IP version to set.
8150  */
8151 static inline void
8152 flow_dv_set_match_ip_version(uint32_t group,
8153                              void *headers_v,
8154                              void *headers_m,
8155                              uint8_t ip_version)
8156 {
8157         if (group == 0)
8158                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8159         else
8160                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8161                          ip_version);
8162         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8163         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8164         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8165 }
8166
8167 /**
8168  * Add Ethernet item to matcher and to the value.
8169  *
8170  * @param[in, out] matcher
8171  *   Flow matcher.
8172  * @param[in, out] key
8173  *   Flow matcher value.
8174  * @param[in] item
8175  *   Flow pattern to translate.
8176  * @param[in] inner
8177  *   Item is inner pattern.
8178  */
8179 static void
8180 flow_dv_translate_item_eth(void *matcher, void *key,
8181                            const struct rte_flow_item *item, int inner,
8182                            uint32_t group)
8183 {
8184         const struct rte_flow_item_eth *eth_m = item->mask;
8185         const struct rte_flow_item_eth *eth_v = item->spec;
8186         const struct rte_flow_item_eth nic_mask = {
8187                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8188                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8189                 .type = RTE_BE16(0xffff),
8190                 .has_vlan = 0,
8191         };
8192         void *hdrs_m;
8193         void *hdrs_v;
8194         char *l24_v;
8195         unsigned int i;
8196
8197         if (!eth_v)
8198                 return;
8199         if (!eth_m)
8200                 eth_m = &nic_mask;
8201         if (inner) {
8202                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8203                                          inner_headers);
8204                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8205         } else {
8206                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8207                                          outer_headers);
8208                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8209         }
8210         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8211                &eth_m->dst, sizeof(eth_m->dst));
8212         /* The value must be in the range of the mask. */
8213         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8214         for (i = 0; i < sizeof(eth_m->dst); ++i)
8215                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8216         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8217                &eth_m->src, sizeof(eth_m->src));
8218         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8219         /* The value must be in the range of the mask. */
8220         for (i = 0; i < sizeof(eth_m->dst); ++i)
8221                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8222         /*
8223          * HW supports match on one Ethertype, the Ethertype following the last
8224          * VLAN tag of the packet (see PRM).
8225          * Set match on ethertype only if ETH header is not followed by VLAN.
8226          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8227          * ethertype, and use ip_version field instead.
8228          * eCPRI over Ether layer will use type value 0xAEFE.
8229          */
8230         if (eth_m->type == 0xFFFF) {
8231                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8232                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8233                 switch (eth_v->type) {
8234                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8235                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8236                         return;
8237                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8238                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8239                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8240                         return;
8241                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8242                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8243                         return;
8244                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8245                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8246                         return;
8247                 default:
8248                         break;
8249                 }
8250         }
8251         if (eth_m->has_vlan) {
8252                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8253                 if (eth_v->has_vlan) {
8254                         /*
8255                          * Here, when also has_more_vlan field in VLAN item is
8256                          * not set, only single-tagged packets will be matched.
8257                          */
8258                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8259                         return;
8260                 }
8261         }
8262         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8263                  rte_be_to_cpu_16(eth_m->type));
8264         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8265         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8266 }
8267
8268 /**
8269  * Add VLAN item to matcher and to the value.
8270  *
8271  * @param[in, out] dev_flow
8272  *   Flow descriptor.
8273  * @param[in, out] matcher
8274  *   Flow matcher.
8275  * @param[in, out] key
8276  *   Flow matcher value.
8277  * @param[in] item
8278  *   Flow pattern to translate.
8279  * @param[in] inner
8280  *   Item is inner pattern.
8281  */
8282 static void
8283 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8284                             void *matcher, void *key,
8285                             const struct rte_flow_item *item,
8286                             int inner, uint32_t group)
8287 {
8288         const struct rte_flow_item_vlan *vlan_m = item->mask;
8289         const struct rte_flow_item_vlan *vlan_v = item->spec;
8290         void *hdrs_m;
8291         void *hdrs_v;
8292         uint16_t tci_m;
8293         uint16_t tci_v;
8294
8295         if (inner) {
8296                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8297                                          inner_headers);
8298                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8299         } else {
8300                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8301                                          outer_headers);
8302                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8303                 /*
8304                  * This is workaround, masks are not supported,
8305                  * and pre-validated.
8306                  */
8307                 if (vlan_v)
8308                         dev_flow->handle->vf_vlan.tag =
8309                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8310         }
8311         /*
8312          * When VLAN item exists in flow, mark packet as tagged,
8313          * even if TCI is not specified.
8314          */
8315         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8316                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8317                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8318         }
8319         if (!vlan_v)
8320                 return;
8321         if (!vlan_m)
8322                 vlan_m = &rte_flow_item_vlan_mask;
8323         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8324         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8325         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8326         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8327         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8328         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8329         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8330         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8331         /*
8332          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8333          * ethertype, and use ip_version field instead.
8334          */
8335         if (vlan_m->inner_type == 0xFFFF) {
8336                 switch (vlan_v->inner_type) {
8337                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8338                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8339                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8340                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8341                         return;
8342                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8343                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8344                         return;
8345                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8346                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8347                         return;
8348                 default:
8349                         break;
8350                 }
8351         }
8352         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8353                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8354                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8355                 /* Only one vlan_tag bit can be set. */
8356                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8357                 return;
8358         }
8359         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8360                  rte_be_to_cpu_16(vlan_m->inner_type));
8361         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8362                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8363 }
8364
8365 /**
8366  * Add IPV4 item to matcher and to the value.
8367  *
8368  * @param[in, out] matcher
8369  *   Flow matcher.
8370  * @param[in, out] key
8371  *   Flow matcher value.
8372  * @param[in] item
8373  *   Flow pattern to translate.
8374  * @param[in] inner
8375  *   Item is inner pattern.
8376  * @param[in] group
8377  *   The group to insert the rule.
8378  */
8379 static void
8380 flow_dv_translate_item_ipv4(void *matcher, void *key,
8381                             const struct rte_flow_item *item,
8382                             int inner, uint32_t group)
8383 {
8384         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8385         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8386         const struct rte_flow_item_ipv4 nic_mask = {
8387                 .hdr = {
8388                         .src_addr = RTE_BE32(0xffffffff),
8389                         .dst_addr = RTE_BE32(0xffffffff),
8390                         .type_of_service = 0xff,
8391                         .next_proto_id = 0xff,
8392                         .time_to_live = 0xff,
8393                 },
8394         };
8395         void *headers_m;
8396         void *headers_v;
8397         char *l24_m;
8398         char *l24_v;
8399         uint8_t tos, ihl_m, ihl_v;
8400
8401         if (inner) {
8402                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8403                                          inner_headers);
8404                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8405         } else {
8406                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8407                                          outer_headers);
8408                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8409         }
8410         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8411         if (!ipv4_v)
8412                 return;
8413         if (!ipv4_m)
8414                 ipv4_m = &nic_mask;
8415         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8416                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8417         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8418                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8419         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8420         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8421         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8422                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8423         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8424                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8425         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8426         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8427         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8428         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8429         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8430         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8431         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8432         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8433                  ipv4_m->hdr.type_of_service);
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8436                  ipv4_m->hdr.type_of_service >> 2);
8437         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8438         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8439                  ipv4_m->hdr.next_proto_id);
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8441                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8442         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8443                  ipv4_m->hdr.time_to_live);
8444         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8445                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8446         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8447                  !!(ipv4_m->hdr.fragment_offset));
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8449                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8450 }
8451
8452 /**
8453  * Add IPV6 item to matcher and to the value.
8454  *
8455  * @param[in, out] matcher
8456  *   Flow matcher.
8457  * @param[in, out] key
8458  *   Flow matcher value.
8459  * @param[in] item
8460  *   Flow pattern to translate.
8461  * @param[in] inner
8462  *   Item is inner pattern.
8463  * @param[in] group
8464  *   The group to insert the rule.
8465  */
8466 static void
8467 flow_dv_translate_item_ipv6(void *matcher, void *key,
8468                             const struct rte_flow_item *item,
8469                             int inner, uint32_t group)
8470 {
8471         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8472         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8473         const struct rte_flow_item_ipv6 nic_mask = {
8474                 .hdr = {
8475                         .src_addr =
8476                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8477                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8478                         .dst_addr =
8479                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8480                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8481                         .vtc_flow = RTE_BE32(0xffffffff),
8482                         .proto = 0xff,
8483                         .hop_limits = 0xff,
8484                 },
8485         };
8486         void *headers_m;
8487         void *headers_v;
8488         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8489         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8490         char *l24_m;
8491         char *l24_v;
8492         uint32_t vtc_m;
8493         uint32_t vtc_v;
8494         int i;
8495         int size;
8496
8497         if (inner) {
8498                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8499                                          inner_headers);
8500                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8501         } else {
8502                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8503                                          outer_headers);
8504                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8505         }
8506         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8507         if (!ipv6_v)
8508                 return;
8509         if (!ipv6_m)
8510                 ipv6_m = &nic_mask;
8511         size = sizeof(ipv6_m->hdr.dst_addr);
8512         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8513                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8514         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8515                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8516         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8517         for (i = 0; i < size; ++i)
8518                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8519         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8520                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8521         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8522                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8523         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8524         for (i = 0; i < size; ++i)
8525                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8526         /* TOS. */
8527         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8528         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8529         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8530         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8531         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8532         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8533         /* Label. */
8534         if (inner) {
8535                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8536                          vtc_m);
8537                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8538                          vtc_v);
8539         } else {
8540                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8541                          vtc_m);
8542                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8543                          vtc_v);
8544         }
8545         /* Protocol. */
8546         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8547                  ipv6_m->hdr.proto);
8548         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8549                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8550         /* Hop limit. */
8551         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8552                  ipv6_m->hdr.hop_limits);
8553         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8554                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8555         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8556                  !!(ipv6_m->has_frag_ext));
8557         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8558                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8559 }
8560
8561 /**
8562  * Add IPV6 fragment extension item to matcher and to the value.
8563  *
8564  * @param[in, out] matcher
8565  *   Flow matcher.
8566  * @param[in, out] key
8567  *   Flow matcher value.
8568  * @param[in] item
8569  *   Flow pattern to translate.
8570  * @param[in] inner
8571  *   Item is inner pattern.
8572  */
8573 static void
8574 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8575                                      const struct rte_flow_item *item,
8576                                      int inner)
8577 {
8578         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8579         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8580         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8581                 .hdr = {
8582                         .next_header = 0xff,
8583                         .frag_data = RTE_BE16(0xffff),
8584                 },
8585         };
8586         void *headers_m;
8587         void *headers_v;
8588
8589         if (inner) {
8590                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8591                                          inner_headers);
8592                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8593         } else {
8594                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8595                                          outer_headers);
8596                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8597         }
8598         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8599         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8600         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8601         if (!ipv6_frag_ext_v)
8602                 return;
8603         if (!ipv6_frag_ext_m)
8604                 ipv6_frag_ext_m = &nic_mask;
8605         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8606                  ipv6_frag_ext_m->hdr.next_header);
8607         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8608                  ipv6_frag_ext_v->hdr.next_header &
8609                  ipv6_frag_ext_m->hdr.next_header);
8610 }
8611
8612 /**
8613  * Add TCP item to matcher and to the value.
8614  *
8615  * @param[in, out] matcher
8616  *   Flow matcher.
8617  * @param[in, out] key
8618  *   Flow matcher value.
8619  * @param[in] item
8620  *   Flow pattern to translate.
8621  * @param[in] inner
8622  *   Item is inner pattern.
8623  */
8624 static void
8625 flow_dv_translate_item_tcp(void *matcher, void *key,
8626                            const struct rte_flow_item *item,
8627                            int inner)
8628 {
8629         const struct rte_flow_item_tcp *tcp_m = item->mask;
8630         const struct rte_flow_item_tcp *tcp_v = item->spec;
8631         void *headers_m;
8632         void *headers_v;
8633
8634         if (inner) {
8635                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8636                                          inner_headers);
8637                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8638         } else {
8639                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8640                                          outer_headers);
8641                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8642         }
8643         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8644         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8645         if (!tcp_v)
8646                 return;
8647         if (!tcp_m)
8648                 tcp_m = &rte_flow_item_tcp_mask;
8649         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8650                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8651         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8652                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8653         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8654                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8655         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8656                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8657         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8658                  tcp_m->hdr.tcp_flags);
8659         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8660                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8661 }
8662
8663 /**
8664  * Add UDP item to matcher and to the value.
8665  *
8666  * @param[in, out] matcher
8667  *   Flow matcher.
8668  * @param[in, out] key
8669  *   Flow matcher value.
8670  * @param[in] item
8671  *   Flow pattern to translate.
8672  * @param[in] inner
8673  *   Item is inner pattern.
8674  */
8675 static void
8676 flow_dv_translate_item_udp(void *matcher, void *key,
8677                            const struct rte_flow_item *item,
8678                            int inner)
8679 {
8680         const struct rte_flow_item_udp *udp_m = item->mask;
8681         const struct rte_flow_item_udp *udp_v = item->spec;
8682         void *headers_m;
8683         void *headers_v;
8684
8685         if (inner) {
8686                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8687                                          inner_headers);
8688                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8689         } else {
8690                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8691                                          outer_headers);
8692                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8693         }
8694         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8695         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8696         if (!udp_v)
8697                 return;
8698         if (!udp_m)
8699                 udp_m = &rte_flow_item_udp_mask;
8700         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8701                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8702         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8703                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8704         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8705                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8706         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8707                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8708 }
8709
8710 /**
8711  * Add GRE optional Key item to matcher and to the value.
8712  *
8713  * @param[in, out] matcher
8714  *   Flow matcher.
8715  * @param[in, out] key
8716  *   Flow matcher value.
8717  * @param[in] item
8718  *   Flow pattern to translate.
8719  * @param[in] inner
8720  *   Item is inner pattern.
8721  */
8722 static void
8723 flow_dv_translate_item_gre_key(void *matcher, void *key,
8724                                    const struct rte_flow_item *item)
8725 {
8726         const rte_be32_t *key_m = item->mask;
8727         const rte_be32_t *key_v = item->spec;
8728         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8729         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8730         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8731
8732         /* GRE K bit must be on and should already be validated */
8733         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8734         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8735         if (!key_v)
8736                 return;
8737         if (!key_m)
8738                 key_m = &gre_key_default_mask;
8739         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8740                  rte_be_to_cpu_32(*key_m) >> 8);
8741         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8742                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8743         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8744                  rte_be_to_cpu_32(*key_m) & 0xFF);
8745         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8746                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8747 }
8748
8749 /**
8750  * Add GRE item to matcher and to the value.
8751  *
8752  * @param[in, out] matcher
8753  *   Flow matcher.
8754  * @param[in, out] key
8755  *   Flow matcher value.
8756  * @param[in] item
8757  *   Flow pattern to translate.
8758  * @param[in] pattern_flags
8759  *   Accumulated pattern flags.
8760  */
8761 static void
8762 flow_dv_translate_item_gre(void *matcher, void *key,
8763                            const struct rte_flow_item *item,
8764                            uint64_t pattern_flags)
8765 {
8766         static const struct rte_flow_item_gre empty_gre = {0,};
8767         const struct rte_flow_item_gre *gre_m = item->mask;
8768         const struct rte_flow_item_gre *gre_v = item->spec;
8769         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8770         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8771         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8772         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8773         struct {
8774                 union {
8775                         __extension__
8776                         struct {
8777                                 uint16_t version:3;
8778                                 uint16_t rsvd0:9;
8779                                 uint16_t s_present:1;
8780                                 uint16_t k_present:1;
8781                                 uint16_t rsvd_bit1:1;
8782                                 uint16_t c_present:1;
8783                         };
8784                         uint16_t value;
8785                 };
8786         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8787         uint16_t protocol_m, protocol_v;
8788
8789         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8790         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8791         if (!gre_v) {
8792                 gre_v = &empty_gre;
8793                 gre_m = &empty_gre;
8794         } else {
8795                 if (!gre_m)
8796                         gre_m = &rte_flow_item_gre_mask;
8797         }
8798         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8799         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8800         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8801                  gre_crks_rsvd0_ver_m.c_present);
8802         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8803                  gre_crks_rsvd0_ver_v.c_present &
8804                  gre_crks_rsvd0_ver_m.c_present);
8805         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8806                  gre_crks_rsvd0_ver_m.k_present);
8807         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8808                  gre_crks_rsvd0_ver_v.k_present &
8809                  gre_crks_rsvd0_ver_m.k_present);
8810         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8811                  gre_crks_rsvd0_ver_m.s_present);
8812         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8813                  gre_crks_rsvd0_ver_v.s_present &
8814                  gre_crks_rsvd0_ver_m.s_present);
8815         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8816         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8817         if (!protocol_m) {
8818                 /* Force next protocol to prevent matchers duplication */
8819                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8820                 if (protocol_v)
8821                         protocol_m = 0xFFFF;
8822         }
8823         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8824         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8825                  protocol_m & protocol_v);
8826 }
8827
8828 /**
8829  * Add NVGRE item to matcher and to the value.
8830  *
8831  * @param[in, out] matcher
8832  *   Flow matcher.
8833  * @param[in, out] key
8834  *   Flow matcher value.
8835  * @param[in] item
8836  *   Flow pattern to translate.
8837  * @param[in] pattern_flags
8838  *   Accumulated pattern flags.
8839  */
8840 static void
8841 flow_dv_translate_item_nvgre(void *matcher, void *key,
8842                              const struct rte_flow_item *item,
8843                              unsigned long pattern_flags)
8844 {
8845         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8846         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8847         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8848         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8849         const char *tni_flow_id_m;
8850         const char *tni_flow_id_v;
8851         char *gre_key_m;
8852         char *gre_key_v;
8853         int size;
8854         int i;
8855
8856         /* For NVGRE, GRE header fields must be set with defined values. */
8857         const struct rte_flow_item_gre gre_spec = {
8858                 .c_rsvd0_ver = RTE_BE16(0x2000),
8859                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8860         };
8861         const struct rte_flow_item_gre gre_mask = {
8862                 .c_rsvd0_ver = RTE_BE16(0xB000),
8863                 .protocol = RTE_BE16(UINT16_MAX),
8864         };
8865         const struct rte_flow_item gre_item = {
8866                 .spec = &gre_spec,
8867                 .mask = &gre_mask,
8868                 .last = NULL,
8869         };
8870         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
8871         if (!nvgre_v)
8872                 return;
8873         if (!nvgre_m)
8874                 nvgre_m = &rte_flow_item_nvgre_mask;
8875         tni_flow_id_m = (const char *)nvgre_m->tni;
8876         tni_flow_id_v = (const char *)nvgre_v->tni;
8877         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8878         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8879         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8880         memcpy(gre_key_m, tni_flow_id_m, size);
8881         for (i = 0; i < size; ++i)
8882                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8883 }
8884
8885 /**
8886  * Add VXLAN item to matcher and to the value.
8887  *
8888  * @param[in] dev
8889  *   Pointer to the Ethernet device structure.
8890  * @param[in] attr
8891  *   Flow rule attributes.
8892  * @param[in, out] matcher
8893  *   Flow matcher.
8894  * @param[in, out] key
8895  *   Flow matcher value.
8896  * @param[in] item
8897  *   Flow pattern to translate.
8898  * @param[in] inner
8899  *   Item is inner pattern.
8900  */
8901 static void
8902 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8903                              const struct rte_flow_attr *attr,
8904                              void *matcher, void *key,
8905                              const struct rte_flow_item *item,
8906                              int inner)
8907 {
8908         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8909         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8910         void *headers_m;
8911         void *headers_v;
8912         void *misc5_m;
8913         void *misc5_v;
8914         uint32_t *tunnel_header_v;
8915         uint32_t *tunnel_header_m;
8916         uint16_t dport;
8917         struct mlx5_priv *priv = dev->data->dev_private;
8918         const struct rte_flow_item_vxlan nic_mask = {
8919                 .vni = "\xff\xff\xff",
8920                 .rsvd1 = 0xff,
8921         };
8922
8923         if (inner) {
8924                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8925                                          inner_headers);
8926                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8927         } else {
8928                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8929                                          outer_headers);
8930                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8931         }
8932         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8933                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8934         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8935                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8936                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8937         }
8938         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8939         if (!vxlan_v)
8940                 return;
8941         if (!vxlan_m) {
8942                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8943                     (attr->group && !priv->sh->misc5_cap))
8944                         vxlan_m = &rte_flow_item_vxlan_mask;
8945                 else
8946                         vxlan_m = &nic_mask;
8947         }
8948         if ((priv->sh->steering_format_version ==
8949             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8950             dport != MLX5_UDP_PORT_VXLAN) ||
8951             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8952             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8953                 void *misc_m;
8954                 void *misc_v;
8955                 char *vni_m;
8956                 char *vni_v;
8957                 int size;
8958                 int i;
8959                 misc_m = MLX5_ADDR_OF(fte_match_param,
8960                                       matcher, misc_parameters);
8961                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8962                 size = sizeof(vxlan_m->vni);
8963                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8964                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8965                 memcpy(vni_m, vxlan_m->vni, size);
8966                 for (i = 0; i < size; ++i)
8967                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8968                 return;
8969         }
8970         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8971         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8972         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8973                                                    misc5_v,
8974                                                    tunnel_header_1);
8975         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8976                                                    misc5_m,
8977                                                    tunnel_header_1);
8978         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8979                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8980                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8981         if (*tunnel_header_v)
8982                 *tunnel_header_m = vxlan_m->vni[0] |
8983                         vxlan_m->vni[1] << 8 |
8984                         vxlan_m->vni[2] << 16;
8985         else
8986                 *tunnel_header_m = 0x0;
8987         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8988         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8989                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8990 }
8991
8992 /**
8993  * Add VXLAN-GPE item to matcher and to the value.
8994  *
8995  * @param[in, out] matcher
8996  *   Flow matcher.
8997  * @param[in, out] key
8998  *   Flow matcher value.
8999  * @param[in] item
9000  *   Flow pattern to translate.
9001  * @param[in] inner
9002  *   Item is inner pattern.
9003  */
9004
9005 static void
9006 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9007                                  const struct rte_flow_item *item,
9008                                  const uint64_t pattern_flags)
9009 {
9010         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9011         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9012         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9013         /* The item was validated to be on the outer side */
9014         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9015         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9016         void *misc_m =
9017                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9018         void *misc_v =
9019                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9020         char *vni_m =
9021                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9022         char *vni_v =
9023                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9024         int i, size = sizeof(vxlan_m->vni);
9025         uint8_t flags_m = 0xff;
9026         uint8_t flags_v = 0xc;
9027         uint8_t m_protocol, v_protocol;
9028
9029         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9030                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9031                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9032                          MLX5_UDP_PORT_VXLAN_GPE);
9033         }
9034         if (!vxlan_v) {
9035                 vxlan_v = &dummy_vxlan_gpe_hdr;
9036                 vxlan_m = &dummy_vxlan_gpe_hdr;
9037         } else {
9038                 if (!vxlan_m)
9039                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9040         }
9041         memcpy(vni_m, vxlan_m->vni, size);
9042         for (i = 0; i < size; ++i)
9043                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9044         if (vxlan_m->flags) {
9045                 flags_m = vxlan_m->flags;
9046                 flags_v = vxlan_v->flags;
9047         }
9048         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9049         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9050         m_protocol = vxlan_m->protocol;
9051         v_protocol = vxlan_v->protocol;
9052         if (!m_protocol) {
9053                 /* Force next protocol to ensure next headers parsing. */
9054                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9055                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9056                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9057                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9058                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9059                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9060                 if (v_protocol)
9061                         m_protocol = 0xFF;
9062         }
9063         MLX5_SET(fte_match_set_misc3, misc_m,
9064                  outer_vxlan_gpe_next_protocol, m_protocol);
9065         MLX5_SET(fte_match_set_misc3, misc_v,
9066                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9067 }
9068
9069 /**
9070  * Add Geneve item to matcher and to the value.
9071  *
9072  * @param[in, out] matcher
9073  *   Flow matcher.
9074  * @param[in, out] key
9075  *   Flow matcher value.
9076  * @param[in] item
9077  *   Flow pattern to translate.
9078  * @param[in] inner
9079  *   Item is inner pattern.
9080  */
9081
9082 static void
9083 flow_dv_translate_item_geneve(void *matcher, void *key,
9084                               const struct rte_flow_item *item,
9085                               uint64_t pattern_flags)
9086 {
9087         static const struct rte_flow_item_geneve empty_geneve = {0,};
9088         const struct rte_flow_item_geneve *geneve_m = item->mask;
9089         const struct rte_flow_item_geneve *geneve_v = item->spec;
9090         /* GENEVE flow item validation allows single tunnel item */
9091         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9092         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9093         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9094         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9095         uint16_t gbhdr_m;
9096         uint16_t gbhdr_v;
9097         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9098         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9099         size_t size = sizeof(geneve_m->vni), i;
9100         uint16_t protocol_m, protocol_v;
9101
9102         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9103                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9104                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9105                          MLX5_UDP_PORT_GENEVE);
9106         }
9107         if (!geneve_v) {
9108                 geneve_v = &empty_geneve;
9109                 geneve_m = &empty_geneve;
9110         } else {
9111                 if (!geneve_m)
9112                         geneve_m = &rte_flow_item_geneve_mask;
9113         }
9114         memcpy(vni_m, geneve_m->vni, size);
9115         for (i = 0; i < size; ++i)
9116                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9117         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9118         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9119         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9120                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9121         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9122                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9123         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9124                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9125         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9126                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9127                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9128         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9129         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9130         if (!protocol_m) {
9131                 /* Force next protocol to prevent matchers duplication */
9132                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9133                 if (protocol_v)
9134                         protocol_m = 0xFFFF;
9135         }
9136         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9137         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9138                  protocol_m & protocol_v);
9139 }
9140
9141 /**
9142  * Create Geneve TLV option resource.
9143  *
9144  * @param dev[in, out]
9145  *   Pointer to rte_eth_dev structure.
9146  * @param[in, out] tag_be24
9147  *   Tag value in big endian then R-shift 8.
9148  * @parm[in, out] dev_flow
9149  *   Pointer to the dev_flow.
9150  * @param[out] error
9151  *   pointer to error structure.
9152  *
9153  * @return
9154  *   0 on success otherwise -errno and errno is set.
9155  */
9156
9157 int
9158 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9159                                              const struct rte_flow_item *item,
9160                                              struct rte_flow_error *error)
9161 {
9162         struct mlx5_priv *priv = dev->data->dev_private;
9163         struct mlx5_dev_ctx_shared *sh = priv->sh;
9164         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9165                         sh->geneve_tlv_option_resource;
9166         struct mlx5_devx_obj *obj;
9167         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9168         int ret = 0;
9169
9170         if (!geneve_opt_v)
9171                 return -1;
9172         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9173         if (geneve_opt_resource != NULL) {
9174                 if (geneve_opt_resource->option_class ==
9175                         geneve_opt_v->option_class &&
9176                         geneve_opt_resource->option_type ==
9177                         geneve_opt_v->option_type &&
9178                         geneve_opt_resource->length ==
9179                         geneve_opt_v->option_len) {
9180                         /* We already have GENEVE TLV option obj allocated. */
9181                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9182                                            __ATOMIC_RELAXED);
9183                 } else {
9184                         ret = rte_flow_error_set(error, ENOMEM,
9185                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9186                                 "Only one GENEVE TLV option supported");
9187                         goto exit;
9188                 }
9189         } else {
9190                 /* Create a GENEVE TLV object and resource. */
9191                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9192                                 geneve_opt_v->option_class,
9193                                 geneve_opt_v->option_type,
9194                                 geneve_opt_v->option_len);
9195                 if (!obj) {
9196                         ret = rte_flow_error_set(error, ENODATA,
9197                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9198                                 "Failed to create GENEVE TLV Devx object");
9199                         goto exit;
9200                 }
9201                 sh->geneve_tlv_option_resource =
9202                                 mlx5_malloc(MLX5_MEM_ZERO,
9203                                                 sizeof(*geneve_opt_resource),
9204                                                 0, SOCKET_ID_ANY);
9205                 if (!sh->geneve_tlv_option_resource) {
9206                         claim_zero(mlx5_devx_cmd_destroy(obj));
9207                         ret = rte_flow_error_set(error, ENOMEM,
9208                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9209                                 "GENEVE TLV object memory allocation failed");
9210                         goto exit;
9211                 }
9212                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9213                 geneve_opt_resource->obj = obj;
9214                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9215                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9216                 geneve_opt_resource->length = geneve_opt_v->option_len;
9217                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9218                                 __ATOMIC_RELAXED);
9219         }
9220 exit:
9221         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9222         return ret;
9223 }
9224
9225 /**
9226  * Add Geneve TLV option item to matcher.
9227  *
9228  * @param[in, out] dev
9229  *   Pointer to rte_eth_dev structure.
9230  * @param[in, out] matcher
9231  *   Flow matcher.
9232  * @param[in, out] key
9233  *   Flow matcher value.
9234  * @param[in] item
9235  *   Flow pattern to translate.
9236  * @param[out] error
9237  *   Pointer to error structure.
9238  */
9239 static int
9240 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9241                                   void *key, const struct rte_flow_item *item,
9242                                   struct rte_flow_error *error)
9243 {
9244         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9245         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9246         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9247         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9248         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9249                         misc_parameters_3);
9250         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9251         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9252         int ret = 0;
9253
9254         if (!geneve_opt_v)
9255                 return -1;
9256         if (!geneve_opt_m)
9257                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9258         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9259                                                            error);
9260         if (ret) {
9261                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9262                 return ret;
9263         }
9264         /*
9265          * Set the option length in GENEVE header if not requested.
9266          * The GENEVE TLV option length is expressed by the option length field
9267          * in the GENEVE header.
9268          * If the option length was not requested but the GENEVE TLV option item
9269          * is present we set the option length field implicitly.
9270          */
9271         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9272                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9273                          MLX5_GENEVE_OPTLEN_MASK);
9274                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9275                          geneve_opt_v->option_len + 1);
9276         }
9277         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9278         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9279         /* Set the data. */
9280         if (geneve_opt_v->data) {
9281                 memcpy(&opt_data_key, geneve_opt_v->data,
9282                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9283                                 sizeof(opt_data_key)));
9284                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9285                                 sizeof(opt_data_key));
9286                 memcpy(&opt_data_mask, geneve_opt_m->data,
9287                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9288                                 sizeof(opt_data_mask)));
9289                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9290                                 sizeof(opt_data_mask));
9291                 MLX5_SET(fte_match_set_misc3, misc3_m,
9292                                 geneve_tlv_option_0_data,
9293                                 rte_be_to_cpu_32(opt_data_mask));
9294                 MLX5_SET(fte_match_set_misc3, misc3_v,
9295                                 geneve_tlv_option_0_data,
9296                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9297         }
9298         return ret;
9299 }
9300
9301 /**
9302  * Add MPLS item to matcher and to the value.
9303  *
9304  * @param[in, out] matcher
9305  *   Flow matcher.
9306  * @param[in, out] key
9307  *   Flow matcher value.
9308  * @param[in] item
9309  *   Flow pattern to translate.
9310  * @param[in] prev_layer
9311  *   The protocol layer indicated in previous item.
9312  * @param[in] inner
9313  *   Item is inner pattern.
9314  */
9315 static void
9316 flow_dv_translate_item_mpls(void *matcher, void *key,
9317                             const struct rte_flow_item *item,
9318                             uint64_t prev_layer,
9319                             int inner)
9320 {
9321         const uint32_t *in_mpls_m = item->mask;
9322         const uint32_t *in_mpls_v = item->spec;
9323         uint32_t *out_mpls_m = 0;
9324         uint32_t *out_mpls_v = 0;
9325         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9326         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9327         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9328                                      misc_parameters_2);
9329         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9330         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9331         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9332
9333         switch (prev_layer) {
9334         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9335                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9336                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9337                                  0xffff);
9338                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9339                                  MLX5_UDP_PORT_MPLS);
9340                 }
9341                 break;
9342         case MLX5_FLOW_LAYER_GRE:
9343                 /* Fall-through. */
9344         case MLX5_FLOW_LAYER_GRE_KEY:
9345                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9346                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9347                                  0xffff);
9348                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9349                                  RTE_ETHER_TYPE_MPLS);
9350                 }
9351                 break;
9352         default:
9353                 break;
9354         }
9355         if (!in_mpls_v)
9356                 return;
9357         if (!in_mpls_m)
9358                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9359         switch (prev_layer) {
9360         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9361                 out_mpls_m =
9362                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9363                                                  outer_first_mpls_over_udp);
9364                 out_mpls_v =
9365                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9366                                                  outer_first_mpls_over_udp);
9367                 break;
9368         case MLX5_FLOW_LAYER_GRE:
9369                 out_mpls_m =
9370                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9371                                                  outer_first_mpls_over_gre);
9372                 out_mpls_v =
9373                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9374                                                  outer_first_mpls_over_gre);
9375                 break;
9376         default:
9377                 /* Inner MPLS not over GRE is not supported. */
9378                 if (!inner) {
9379                         out_mpls_m =
9380                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9381                                                          misc2_m,
9382                                                          outer_first_mpls);
9383                         out_mpls_v =
9384                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9385                                                          misc2_v,
9386                                                          outer_first_mpls);
9387                 }
9388                 break;
9389         }
9390         if (out_mpls_m && out_mpls_v) {
9391                 *out_mpls_m = *in_mpls_m;
9392                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9393         }
9394 }
9395
9396 /**
9397  * Add metadata register item to matcher
9398  *
9399  * @param[in, out] matcher
9400  *   Flow matcher.
9401  * @param[in, out] key
9402  *   Flow matcher value.
9403  * @param[in] reg_type
9404  *   Type of device metadata register
9405  * @param[in] value
9406  *   Register value
9407  * @param[in] mask
9408  *   Register mask
9409  */
9410 static void
9411 flow_dv_match_meta_reg(void *matcher, void *key,
9412                        enum modify_reg reg_type,
9413                        uint32_t data, uint32_t mask)
9414 {
9415         void *misc2_m =
9416                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9417         void *misc2_v =
9418                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9419         uint32_t temp;
9420
9421         data &= mask;
9422         switch (reg_type) {
9423         case REG_A:
9424                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9425                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9426                 break;
9427         case REG_B:
9428                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9429                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9430                 break;
9431         case REG_C_0:
9432                 /*
9433                  * The metadata register C0 field might be divided into
9434                  * source vport index and META item value, we should set
9435                  * this field according to specified mask, not as whole one.
9436                  */
9437                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9438                 temp |= mask;
9439                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9440                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9441                 temp &= ~mask;
9442                 temp |= data;
9443                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9444                 break;
9445         case REG_C_1:
9446                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9447                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9448                 break;
9449         case REG_C_2:
9450                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9451                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9452                 break;
9453         case REG_C_3:
9454                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9455                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9456                 break;
9457         case REG_C_4:
9458                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9459                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9460                 break;
9461         case REG_C_5:
9462                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9463                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9464                 break;
9465         case REG_C_6:
9466                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9467                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9468                 break;
9469         case REG_C_7:
9470                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9471                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9472                 break;
9473         default:
9474                 MLX5_ASSERT(false);
9475                 break;
9476         }
9477 }
9478
9479 /**
9480  * Add MARK item to matcher
9481  *
9482  * @param[in] dev
9483  *   The device to configure through.
9484  * @param[in, out] matcher
9485  *   Flow matcher.
9486  * @param[in, out] key
9487  *   Flow matcher value.
9488  * @param[in] item
9489  *   Flow pattern to translate.
9490  */
9491 static void
9492 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9493                             void *matcher, void *key,
9494                             const struct rte_flow_item *item)
9495 {
9496         struct mlx5_priv *priv = dev->data->dev_private;
9497         const struct rte_flow_item_mark *mark;
9498         uint32_t value;
9499         uint32_t mask;
9500
9501         mark = item->mask ? (const void *)item->mask :
9502                             &rte_flow_item_mark_mask;
9503         mask = mark->id & priv->sh->dv_mark_mask;
9504         mark = (const void *)item->spec;
9505         MLX5_ASSERT(mark);
9506         value = mark->id & priv->sh->dv_mark_mask & mask;
9507         if (mask) {
9508                 enum modify_reg reg;
9509
9510                 /* Get the metadata register index for the mark. */
9511                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9512                 MLX5_ASSERT(reg > 0);
9513                 if (reg == REG_C_0) {
9514                         struct mlx5_priv *priv = dev->data->dev_private;
9515                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9516                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9517
9518                         mask &= msk_c0;
9519                         mask <<= shl_c0;
9520                         value <<= shl_c0;
9521                 }
9522                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9523         }
9524 }
9525
9526 /**
9527  * Add META item to matcher
9528  *
9529  * @param[in] dev
9530  *   The devich to configure through.
9531  * @param[in, out] matcher
9532  *   Flow matcher.
9533  * @param[in, out] key
9534  *   Flow matcher value.
9535  * @param[in] attr
9536  *   Attributes of flow that includes this item.
9537  * @param[in] item
9538  *   Flow pattern to translate.
9539  */
9540 static void
9541 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9542                             void *matcher, void *key,
9543                             const struct rte_flow_attr *attr,
9544                             const struct rte_flow_item *item)
9545 {
9546         const struct rte_flow_item_meta *meta_m;
9547         const struct rte_flow_item_meta *meta_v;
9548
9549         meta_m = (const void *)item->mask;
9550         if (!meta_m)
9551                 meta_m = &rte_flow_item_meta_mask;
9552         meta_v = (const void *)item->spec;
9553         if (meta_v) {
9554                 int reg;
9555                 uint32_t value = meta_v->data;
9556                 uint32_t mask = meta_m->data;
9557
9558                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9559                 if (reg < 0)
9560                         return;
9561                 MLX5_ASSERT(reg != REG_NON);
9562                 if (reg == REG_C_0) {
9563                         struct mlx5_priv *priv = dev->data->dev_private;
9564                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9565                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9566
9567                         mask &= msk_c0;
9568                         mask <<= shl_c0;
9569                         value <<= shl_c0;
9570                 }
9571                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9572         }
9573 }
9574
9575 /**
9576  * Add vport metadata Reg C0 item to matcher
9577  *
9578  * @param[in, out] matcher
9579  *   Flow matcher.
9580  * @param[in, out] key
9581  *   Flow matcher value.
9582  * @param[in] reg
9583  *   Flow pattern to translate.
9584  */
9585 static void
9586 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9587                                   uint32_t value, uint32_t mask)
9588 {
9589         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9590 }
9591
9592 /**
9593  * Add tag item to matcher
9594  *
9595  * @param[in] dev
9596  *   The devich to configure through.
9597  * @param[in, out] matcher
9598  *   Flow matcher.
9599  * @param[in, out] key
9600  *   Flow matcher value.
9601  * @param[in] item
9602  *   Flow pattern to translate.
9603  */
9604 static void
9605 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9606                                 void *matcher, void *key,
9607                                 const struct rte_flow_item *item)
9608 {
9609         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9610         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9611         uint32_t mask, value;
9612
9613         MLX5_ASSERT(tag_v);
9614         value = tag_v->data;
9615         mask = tag_m ? tag_m->data : UINT32_MAX;
9616         if (tag_v->id == REG_C_0) {
9617                 struct mlx5_priv *priv = dev->data->dev_private;
9618                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9619                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9620
9621                 mask &= msk_c0;
9622                 mask <<= shl_c0;
9623                 value <<= shl_c0;
9624         }
9625         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9626 }
9627
9628 /**
9629  * Add TAG item to matcher
9630  *
9631  * @param[in] dev
9632  *   The devich to configure through.
9633  * @param[in, out] matcher
9634  *   Flow matcher.
9635  * @param[in, out] key
9636  *   Flow matcher value.
9637  * @param[in] item
9638  *   Flow pattern to translate.
9639  */
9640 static void
9641 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9642                            void *matcher, void *key,
9643                            const struct rte_flow_item *item)
9644 {
9645         const struct rte_flow_item_tag *tag_v = item->spec;
9646         const struct rte_flow_item_tag *tag_m = item->mask;
9647         enum modify_reg reg;
9648
9649         MLX5_ASSERT(tag_v);
9650         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9651         /* Get the metadata register index for the tag. */
9652         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9653         MLX5_ASSERT(reg > 0);
9654         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9655 }
9656
9657 /**
9658  * Add source vport match to the specified matcher.
9659  *
9660  * @param[in, out] matcher
9661  *   Flow matcher.
9662  * @param[in, out] key
9663  *   Flow matcher value.
9664  * @param[in] port
9665  *   Source vport value to match
9666  * @param[in] mask
9667  *   Mask
9668  */
9669 static void
9670 flow_dv_translate_item_source_vport(void *matcher, void *key,
9671                                     int16_t port, uint16_t mask)
9672 {
9673         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9674         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9675
9676         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9677         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9678 }
9679
9680 /**
9681  * Translate port-id item to eswitch match on  port-id.
9682  *
9683  * @param[in] dev
9684  *   The devich to configure through.
9685  * @param[in, out] matcher
9686  *   Flow matcher.
9687  * @param[in, out] key
9688  *   Flow matcher value.
9689  * @param[in] item
9690  *   Flow pattern to translate.
9691  * @param[in]
9692  *   Flow attributes.
9693  *
9694  * @return
9695  *   0 on success, a negative errno value otherwise.
9696  */
9697 static int
9698 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9699                                void *key, const struct rte_flow_item *item,
9700                                const struct rte_flow_attr *attr)
9701 {
9702         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9703         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9704         struct mlx5_priv *priv;
9705         uint16_t mask, id;
9706
9707         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9708                 flow_dv_translate_item_source_vport(matcher, key,
9709                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9710                 return 0;
9711         }
9712         mask = pid_m ? pid_m->id : 0xffff;
9713         id = pid_v ? pid_v->id : dev->data->port_id;
9714         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9715         if (!priv)
9716                 return -rte_errno;
9717         /*
9718          * Translate to vport field or to metadata, depending on mode.
9719          * Kernel can use either misc.source_port or half of C0 metadata
9720          * register.
9721          */
9722         if (priv->vport_meta_mask) {
9723                 /*
9724                  * Provide the hint for SW steering library
9725                  * to insert the flow into ingress domain and
9726                  * save the extra vport match.
9727                  */
9728                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9729                     priv->pf_bond < 0 && attr->transfer)
9730                         flow_dv_translate_item_source_vport
9731                                 (matcher, key, priv->vport_id, mask);
9732                 /*
9733                  * We should always set the vport metadata register,
9734                  * otherwise the SW steering library can drop
9735                  * the rule if wire vport metadata value is not zero,
9736                  * it depends on kernel configuration.
9737                  */
9738                 flow_dv_translate_item_meta_vport(matcher, key,
9739                                                   priv->vport_meta_tag,
9740                                                   priv->vport_meta_mask);
9741         } else {
9742                 flow_dv_translate_item_source_vport(matcher, key,
9743                                                     priv->vport_id, mask);
9744         }
9745         return 0;
9746 }
9747
9748 /**
9749  * Add ICMP6 item to matcher and to the value.
9750  *
9751  * @param[in, out] matcher
9752  *   Flow matcher.
9753  * @param[in, out] key
9754  *   Flow matcher value.
9755  * @param[in] item
9756  *   Flow pattern to translate.
9757  * @param[in] inner
9758  *   Item is inner pattern.
9759  */
9760 static void
9761 flow_dv_translate_item_icmp6(void *matcher, void *key,
9762                               const struct rte_flow_item *item,
9763                               int inner)
9764 {
9765         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9766         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9767         void *headers_m;
9768         void *headers_v;
9769         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9770                                      misc_parameters_3);
9771         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9772         if (inner) {
9773                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9774                                          inner_headers);
9775                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9776         } else {
9777                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9778                                          outer_headers);
9779                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9780         }
9781         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9782         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9783         if (!icmp6_v)
9784                 return;
9785         if (!icmp6_m)
9786                 icmp6_m = &rte_flow_item_icmp6_mask;
9787         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9788         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9789                  icmp6_v->type & icmp6_m->type);
9790         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9791         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9792                  icmp6_v->code & icmp6_m->code);
9793 }
9794
9795 /**
9796  * Add ICMP item to matcher and to the value.
9797  *
9798  * @param[in, out] matcher
9799  *   Flow matcher.
9800  * @param[in, out] key
9801  *   Flow matcher value.
9802  * @param[in] item
9803  *   Flow pattern to translate.
9804  * @param[in] inner
9805  *   Item is inner pattern.
9806  */
9807 static void
9808 flow_dv_translate_item_icmp(void *matcher, void *key,
9809                             const struct rte_flow_item *item,
9810                             int inner)
9811 {
9812         const struct rte_flow_item_icmp *icmp_m = item->mask;
9813         const struct rte_flow_item_icmp *icmp_v = item->spec;
9814         uint32_t icmp_header_data_m = 0;
9815         uint32_t icmp_header_data_v = 0;
9816         void *headers_m;
9817         void *headers_v;
9818         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9819                                      misc_parameters_3);
9820         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9821         if (inner) {
9822                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9823                                          inner_headers);
9824                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9825         } else {
9826                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9827                                          outer_headers);
9828                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9829         }
9830         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9831         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9832         if (!icmp_v)
9833                 return;
9834         if (!icmp_m)
9835                 icmp_m = &rte_flow_item_icmp_mask;
9836         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9837                  icmp_m->hdr.icmp_type);
9838         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9839                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9840         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9841                  icmp_m->hdr.icmp_code);
9842         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9843                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9844         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9845         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9846         if (icmp_header_data_m) {
9847                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9848                 icmp_header_data_v |=
9849                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9850                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9851                          icmp_header_data_m);
9852                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9853                          icmp_header_data_v & icmp_header_data_m);
9854         }
9855 }
9856
9857 /**
9858  * Add GTP item to matcher and to the value.
9859  *
9860  * @param[in, out] matcher
9861  *   Flow matcher.
9862  * @param[in, out] key
9863  *   Flow matcher value.
9864  * @param[in] item
9865  *   Flow pattern to translate.
9866  * @param[in] inner
9867  *   Item is inner pattern.
9868  */
9869 static void
9870 flow_dv_translate_item_gtp(void *matcher, void *key,
9871                            const struct rte_flow_item *item, int inner)
9872 {
9873         const struct rte_flow_item_gtp *gtp_m = item->mask;
9874         const struct rte_flow_item_gtp *gtp_v = item->spec;
9875         void *headers_m;
9876         void *headers_v;
9877         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9878                                      misc_parameters_3);
9879         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9880         uint16_t dport = RTE_GTPU_UDP_PORT;
9881
9882         if (inner) {
9883                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9884                                          inner_headers);
9885                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9886         } else {
9887                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9888                                          outer_headers);
9889                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9890         }
9891         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9892                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9893                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9894         }
9895         if (!gtp_v)
9896                 return;
9897         if (!gtp_m)
9898                 gtp_m = &rte_flow_item_gtp_mask;
9899         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9900                  gtp_m->v_pt_rsv_flags);
9901         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9902                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9903         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9904         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9905                  gtp_v->msg_type & gtp_m->msg_type);
9906         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9907                  rte_be_to_cpu_32(gtp_m->teid));
9908         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9909                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9910 }
9911
9912 /**
9913  * Add GTP PSC item to matcher.
9914  *
9915  * @param[in, out] matcher
9916  *   Flow matcher.
9917  * @param[in, out] key
9918  *   Flow matcher value.
9919  * @param[in] item
9920  *   Flow pattern to translate.
9921  */
9922 static int
9923 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9924                                const struct rte_flow_item *item)
9925 {
9926         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9927         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9928         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9929                         misc_parameters_3);
9930         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9931         union {
9932                 uint32_t w32;
9933                 struct {
9934                         uint16_t seq_num;
9935                         uint8_t npdu_num;
9936                         uint8_t next_ext_header_type;
9937                 };
9938         } dw_2;
9939         uint8_t gtp_flags;
9940
9941         /* Always set E-flag match on one, regardless of GTP item settings. */
9942         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9943         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9944         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9945         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9946         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9947         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9948         /*Set next extension header type. */
9949         dw_2.seq_num = 0;
9950         dw_2.npdu_num = 0;
9951         dw_2.next_ext_header_type = 0xff;
9952         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9953                  rte_cpu_to_be_32(dw_2.w32));
9954         dw_2.seq_num = 0;
9955         dw_2.npdu_num = 0;
9956         dw_2.next_ext_header_type = 0x85;
9957         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9958                  rte_cpu_to_be_32(dw_2.w32));
9959         if (gtp_psc_v) {
9960                 union {
9961                         uint32_t w32;
9962                         struct {
9963                                 uint8_t len;
9964                                 uint8_t type_flags;
9965                                 uint8_t qfi;
9966                                 uint8_t reserved;
9967                         };
9968                 } dw_0;
9969
9970                 /*Set extension header PDU type and Qos. */
9971                 if (!gtp_psc_m)
9972                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9973                 dw_0.w32 = 0;
9974                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9975                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9976                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9977                          rte_cpu_to_be_32(dw_0.w32));
9978                 dw_0.w32 = 0;
9979                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9980                                                         gtp_psc_m->hdr.type);
9981                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9982                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9983                          rte_cpu_to_be_32(dw_0.w32));
9984         }
9985         return 0;
9986 }
9987
9988 /**
9989  * Add eCPRI item to matcher and to the value.
9990  *
9991  * @param[in] dev
9992  *   The devich to configure through.
9993  * @param[in, out] matcher
9994  *   Flow matcher.
9995  * @param[in, out] key
9996  *   Flow matcher value.
9997  * @param[in] item
9998  *   Flow pattern to translate.
9999  * @param[in] last_item
10000  *   Last item flags.
10001  */
10002 static void
10003 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10004                              void *key, const struct rte_flow_item *item,
10005                              uint64_t last_item)
10006 {
10007         struct mlx5_priv *priv = dev->data->dev_private;
10008         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10009         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10010         struct rte_ecpri_common_hdr common;
10011         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10012                                      misc_parameters_4);
10013         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10014         uint32_t *samples;
10015         void *dw_m;
10016         void *dw_v;
10017
10018         /*
10019          * In case of eCPRI over Ethernet, if EtherType is not specified,
10020          * match on eCPRI EtherType implicitly.
10021          */
10022         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10023                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10024
10025                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10026                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10027                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10028                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10029                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10030                         *(uint16_t *)l2m = UINT16_MAX;
10031                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10032                 }
10033         }
10034         if (!ecpri_v)
10035                 return;
10036         if (!ecpri_m)
10037                 ecpri_m = &rte_flow_item_ecpri_mask;
10038         /*
10039          * Maximal four DW samples are supported in a single matching now.
10040          * Two are used now for a eCPRI matching:
10041          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10042          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10043          *    if any.
10044          */
10045         if (!ecpri_m->hdr.common.u32)
10046                 return;
10047         samples = priv->sh->ecpri_parser.ids;
10048         /* Need to take the whole DW as the mask to fill the entry. */
10049         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10050                             prog_sample_field_value_0);
10051         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10052                             prog_sample_field_value_0);
10053         /* Already big endian (network order) in the header. */
10054         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10055         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10056         /* Sample#0, used for matching type, offset 0. */
10057         MLX5_SET(fte_match_set_misc4, misc4_m,
10058                  prog_sample_field_id_0, samples[0]);
10059         /* It makes no sense to set the sample ID in the mask field. */
10060         MLX5_SET(fte_match_set_misc4, misc4_v,
10061                  prog_sample_field_id_0, samples[0]);
10062         /*
10063          * Checking if message body part needs to be matched.
10064          * Some wildcard rules only matching type field should be supported.
10065          */
10066         if (ecpri_m->hdr.dummy[0]) {
10067                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10068                 switch (common.type) {
10069                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10070                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10071                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10072                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10073                                             prog_sample_field_value_1);
10074                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10075                                             prog_sample_field_value_1);
10076                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10077                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10078                                             ecpri_m->hdr.dummy[0];
10079                         /* Sample#1, to match message body, offset 4. */
10080                         MLX5_SET(fte_match_set_misc4, misc4_m,
10081                                  prog_sample_field_id_1, samples[1]);
10082                         MLX5_SET(fte_match_set_misc4, misc4_v,
10083                                  prog_sample_field_id_1, samples[1]);
10084                         break;
10085                 default:
10086                         /* Others, do not match any sample ID. */
10087                         break;
10088                 }
10089         }
10090 }
10091
10092 /*
10093  * Add connection tracking status item to matcher
10094  *
10095  * @param[in] dev
10096  *   The devich to configure through.
10097  * @param[in, out] matcher
10098  *   Flow matcher.
10099  * @param[in, out] key
10100  *   Flow matcher value.
10101  * @param[in] item
10102  *   Flow pattern to translate.
10103  */
10104 static void
10105 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10106                               void *matcher, void *key,
10107                               const struct rte_flow_item *item)
10108 {
10109         uint32_t reg_value = 0;
10110         int reg_id;
10111         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10112         uint32_t reg_mask = 0;
10113         const struct rte_flow_item_conntrack *spec = item->spec;
10114         const struct rte_flow_item_conntrack *mask = item->mask;
10115         uint32_t flags;
10116         struct rte_flow_error error;
10117
10118         if (!mask)
10119                 mask = &rte_flow_item_conntrack_mask;
10120         if (!spec || !mask->flags)
10121                 return;
10122         flags = spec->flags & mask->flags;
10123         /* The conflict should be checked in the validation. */
10124         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10125                 reg_value |= MLX5_CT_SYNDROME_VALID;
10126         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10127                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10128         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10129                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10130         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10131                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10132         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10133                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10134         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10135                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10136                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10137                 reg_mask |= 0xc0;
10138         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10139                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10140         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10141                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10142         /* The REG_C_x value could be saved during startup. */
10143         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10144         if (reg_id == REG_NON)
10145                 return;
10146         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10147                                reg_value, reg_mask);
10148 }
10149
10150 static void
10151 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10152                             const struct rte_flow_item *item,
10153                             struct mlx5_flow *dev_flow, bool is_inner)
10154 {
10155         const struct rte_flow_item_flex *spec =
10156                 (const struct rte_flow_item_flex *)item->spec;
10157         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10158
10159         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10160         if (index < 0)
10161                 return;
10162         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10163                 /* Don't count both inner and outer flex items in one rule. */
10164                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10165                         MLX5_ASSERT(false);
10166                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10167         }
10168         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10169 }
10170
10171 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10172
10173 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10174         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10175                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10176
10177 /**
10178  * Calculate flow matcher enable bitmap.
10179  *
10180  * @param match_criteria
10181  *   Pointer to flow matcher criteria.
10182  *
10183  * @return
10184  *   Bitmap of enabled fields.
10185  */
10186 static uint8_t
10187 flow_dv_matcher_enable(uint32_t *match_criteria)
10188 {
10189         uint8_t match_criteria_enable;
10190
10191         match_criteria_enable =
10192                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10193                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10194         match_criteria_enable |=
10195                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10196                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10197         match_criteria_enable |=
10198                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10199                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10200         match_criteria_enable |=
10201                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10202                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10203         match_criteria_enable |=
10204                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10205                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10206         match_criteria_enable |=
10207                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10208                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10209         match_criteria_enable |=
10210                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10211                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10212         return match_criteria_enable;
10213 }
10214
10215 static void
10216 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10217 {
10218         /*
10219          * Check flow matching criteria first, subtract misc5/4 length if flow
10220          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10221          * misc5/4 are not supported, and matcher creation failure is expected
10222          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10223          * misc5 is right after misc4.
10224          */
10225         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10226                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10227                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10228                 if (!(match_criteria & (1 <<
10229                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10230                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10231                 }
10232         }
10233 }
10234
10235 static struct mlx5_list_entry *
10236 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10237                          struct mlx5_list_entry *entry, void *cb_ctx)
10238 {
10239         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10240         struct mlx5_flow_dv_matcher *ref = ctx->data;
10241         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10242                                                             typeof(*tbl), tbl);
10243         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10244                                                             sizeof(*resource),
10245                                                             0, SOCKET_ID_ANY);
10246
10247         if (!resource) {
10248                 rte_flow_error_set(ctx->error, ENOMEM,
10249                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10250                                    "cannot create matcher");
10251                 return NULL;
10252         }
10253         memcpy(resource, entry, sizeof(*resource));
10254         resource->tbl = &tbl->tbl;
10255         return &resource->entry;
10256 }
10257
10258 static void
10259 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10260                              struct mlx5_list_entry *entry)
10261 {
10262         mlx5_free(entry);
10263 }
10264
10265 struct mlx5_list_entry *
10266 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10267 {
10268         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10269         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10270         struct rte_eth_dev *dev = ctx->dev;
10271         struct mlx5_flow_tbl_data_entry *tbl_data;
10272         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10273         struct rte_flow_error *error = ctx->error;
10274         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10275         struct mlx5_flow_tbl_resource *tbl;
10276         void *domain;
10277         uint32_t idx = 0;
10278         int ret;
10279
10280         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10281         if (!tbl_data) {
10282                 rte_flow_error_set(error, ENOMEM,
10283                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10284                                    NULL,
10285                                    "cannot allocate flow table data entry");
10286                 return NULL;
10287         }
10288         tbl_data->idx = idx;
10289         tbl_data->tunnel = tt_prm->tunnel;
10290         tbl_data->group_id = tt_prm->group_id;
10291         tbl_data->external = !!tt_prm->external;
10292         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10293         tbl_data->is_egress = !!key.is_egress;
10294         tbl_data->is_transfer = !!key.is_fdb;
10295         tbl_data->dummy = !!key.dummy;
10296         tbl_data->level = key.level;
10297         tbl_data->id = key.id;
10298         tbl = &tbl_data->tbl;
10299         if (key.dummy)
10300                 return &tbl_data->entry;
10301         if (key.is_fdb)
10302                 domain = sh->fdb_domain;
10303         else if (key.is_egress)
10304                 domain = sh->tx_domain;
10305         else
10306                 domain = sh->rx_domain;
10307         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10308         if (ret) {
10309                 rte_flow_error_set(error, ENOMEM,
10310                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10311                                    NULL, "cannot create flow table object");
10312                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10313                 return NULL;
10314         }
10315         if (key.level != 0) {
10316                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10317                                         (tbl->obj, &tbl_data->jump.action);
10318                 if (ret) {
10319                         rte_flow_error_set(error, ENOMEM,
10320                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10321                                            NULL,
10322                                            "cannot create flow jump action");
10323                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10324                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10325                         return NULL;
10326                 }
10327         }
10328         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10329               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10330               key.level, key.id);
10331         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10332                                               flow_dv_matcher_create_cb,
10333                                               flow_dv_matcher_match_cb,
10334                                               flow_dv_matcher_remove_cb,
10335                                               flow_dv_matcher_clone_cb,
10336                                               flow_dv_matcher_clone_free_cb);
10337         if (!tbl_data->matchers) {
10338                 rte_flow_error_set(error, ENOMEM,
10339                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10340                                    NULL,
10341                                    "cannot create tbl matcher list");
10342                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10343                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10344                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10345                 return NULL;
10346         }
10347         return &tbl_data->entry;
10348 }
10349
10350 int
10351 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10352                      void *cb_ctx)
10353 {
10354         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10355         struct mlx5_flow_tbl_data_entry *tbl_data =
10356                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10357         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10358
10359         return tbl_data->level != key.level ||
10360                tbl_data->id != key.id ||
10361                tbl_data->dummy != key.dummy ||
10362                tbl_data->is_transfer != !!key.is_fdb ||
10363                tbl_data->is_egress != !!key.is_egress;
10364 }
10365
10366 struct mlx5_list_entry *
10367 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10368                       void *cb_ctx)
10369 {
10370         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10371         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10372         struct mlx5_flow_tbl_data_entry *tbl_data;
10373         struct rte_flow_error *error = ctx->error;
10374         uint32_t idx = 0;
10375
10376         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10377         if (!tbl_data) {
10378                 rte_flow_error_set(error, ENOMEM,
10379                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10380                                    NULL,
10381                                    "cannot allocate flow table data entry");
10382                 return NULL;
10383         }
10384         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10385         tbl_data->idx = idx;
10386         return &tbl_data->entry;
10387 }
10388
10389 void
10390 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10391 {
10392         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10393         struct mlx5_flow_tbl_data_entry *tbl_data =
10394                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10395
10396         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10397 }
10398
10399 /**
10400  * Get a flow table.
10401  *
10402  * @param[in, out] dev
10403  *   Pointer to rte_eth_dev structure.
10404  * @param[in] table_level
10405  *   Table level to use.
10406  * @param[in] egress
10407  *   Direction of the table.
10408  * @param[in] transfer
10409  *   E-Switch or NIC flow.
10410  * @param[in] dummy
10411  *   Dummy entry for dv API.
10412  * @param[in] table_id
10413  *   Table id to use.
10414  * @param[out] error
10415  *   pointer to error structure.
10416  *
10417  * @return
10418  *   Returns tables resource based on the index, NULL in case of failed.
10419  */
10420 struct mlx5_flow_tbl_resource *
10421 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10422                          uint32_t table_level, uint8_t egress,
10423                          uint8_t transfer,
10424                          bool external,
10425                          const struct mlx5_flow_tunnel *tunnel,
10426                          uint32_t group_id, uint8_t dummy,
10427                          uint32_t table_id,
10428                          struct rte_flow_error *error)
10429 {
10430         struct mlx5_priv *priv = dev->data->dev_private;
10431         union mlx5_flow_tbl_key table_key = {
10432                 {
10433                         .level = table_level,
10434                         .id = table_id,
10435                         .reserved = 0,
10436                         .dummy = !!dummy,
10437                         .is_fdb = !!transfer,
10438                         .is_egress = !!egress,
10439                 }
10440         };
10441         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10442                 .tunnel = tunnel,
10443                 .group_id = group_id,
10444                 .external = external,
10445         };
10446         struct mlx5_flow_cb_ctx ctx = {
10447                 .dev = dev,
10448                 .error = error,
10449                 .data = &table_key.v64,
10450                 .data2 = &tt_prm,
10451         };
10452         struct mlx5_list_entry *entry;
10453         struct mlx5_flow_tbl_data_entry *tbl_data;
10454
10455         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10456         if (!entry) {
10457                 rte_flow_error_set(error, ENOMEM,
10458                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10459                                    "cannot get table");
10460                 return NULL;
10461         }
10462         DRV_LOG(DEBUG, "table_level %u table_id %u "
10463                 "tunnel %u group %u registered.",
10464                 table_level, table_id,
10465                 tunnel ? tunnel->tunnel_id : 0, group_id);
10466         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10467         return &tbl_data->tbl;
10468 }
10469
10470 void
10471 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10472 {
10473         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10474         struct mlx5_flow_tbl_data_entry *tbl_data =
10475                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10476
10477         MLX5_ASSERT(entry && sh);
10478         if (tbl_data->jump.action)
10479                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10480         if (tbl_data->tbl.obj)
10481                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10482         if (tbl_data->tunnel_offload && tbl_data->external) {
10483                 struct mlx5_list_entry *he;
10484                 struct mlx5_hlist *tunnel_grp_hash;
10485                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10486                 union tunnel_tbl_key tunnel_key = {
10487                         .tunnel_id = tbl_data->tunnel ?
10488                                         tbl_data->tunnel->tunnel_id : 0,
10489                         .group = tbl_data->group_id
10490                 };
10491                 uint32_t table_level = tbl_data->level;
10492                 struct mlx5_flow_cb_ctx ctx = {
10493                         .data = (void *)&tunnel_key.val,
10494                 };
10495
10496                 tunnel_grp_hash = tbl_data->tunnel ?
10497                                         tbl_data->tunnel->groups :
10498                                         thub->groups;
10499                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10500                 if (he)
10501                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10502                 DRV_LOG(DEBUG,
10503                         "table_level %u id %u tunnel %u group %u released.",
10504                         table_level,
10505                         tbl_data->id,
10506                         tbl_data->tunnel ?
10507                         tbl_data->tunnel->tunnel_id : 0,
10508                         tbl_data->group_id);
10509         }
10510         mlx5_list_destroy(tbl_data->matchers);
10511         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10512 }
10513
10514 /**
10515  * Release a flow table.
10516  *
10517  * @param[in] sh
10518  *   Pointer to device shared structure.
10519  * @param[in] tbl
10520  *   Table resource to be released.
10521  *
10522  * @return
10523  *   Returns 0 if table was released, else return 1;
10524  */
10525 static int
10526 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10527                              struct mlx5_flow_tbl_resource *tbl)
10528 {
10529         struct mlx5_flow_tbl_data_entry *tbl_data =
10530                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10531
10532         if (!tbl)
10533                 return 0;
10534         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10535 }
10536
10537 int
10538 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10539                          struct mlx5_list_entry *entry, void *cb_ctx)
10540 {
10541         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10542         struct mlx5_flow_dv_matcher *ref = ctx->data;
10543         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10544                                                         entry);
10545
10546         return cur->crc != ref->crc ||
10547                cur->priority != ref->priority ||
10548                memcmp((const void *)cur->mask.buf,
10549                       (const void *)ref->mask.buf, ref->mask.size);
10550 }
10551
10552 struct mlx5_list_entry *
10553 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10554 {
10555         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10556         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10557         struct mlx5_flow_dv_matcher *ref = ctx->data;
10558         struct mlx5_flow_dv_matcher *resource;
10559         struct mlx5dv_flow_matcher_attr dv_attr = {
10560                 .type = IBV_FLOW_ATTR_NORMAL,
10561                 .match_mask = (void *)&ref->mask,
10562         };
10563         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10564                                                             typeof(*tbl), tbl);
10565         int ret;
10566
10567         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10568                                SOCKET_ID_ANY);
10569         if (!resource) {
10570                 rte_flow_error_set(ctx->error, ENOMEM,
10571                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10572                                    "cannot create matcher");
10573                 return NULL;
10574         }
10575         *resource = *ref;
10576         dv_attr.match_criteria_enable =
10577                 flow_dv_matcher_enable(resource->mask.buf);
10578         __flow_dv_adjust_buf_size(&ref->mask.size,
10579                                   dv_attr.match_criteria_enable);
10580         dv_attr.priority = ref->priority;
10581         if (tbl->is_egress)
10582                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10583         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10584                                                tbl->tbl.obj,
10585                                                &resource->matcher_object);
10586         if (ret) {
10587                 mlx5_free(resource);
10588                 rte_flow_error_set(ctx->error, ENOMEM,
10589                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10590                                    "cannot create matcher");
10591                 return NULL;
10592         }
10593         return &resource->entry;
10594 }
10595
10596 /**
10597  * Register the flow matcher.
10598  *
10599  * @param[in, out] dev
10600  *   Pointer to rte_eth_dev structure.
10601  * @param[in, out] matcher
10602  *   Pointer to flow matcher.
10603  * @param[in, out] key
10604  *   Pointer to flow table key.
10605  * @parm[in, out] dev_flow
10606  *   Pointer to the dev_flow.
10607  * @param[out] error
10608  *   pointer to error structure.
10609  *
10610  * @return
10611  *   0 on success otherwise -errno and errno is set.
10612  */
10613 static int
10614 flow_dv_matcher_register(struct rte_eth_dev *dev,
10615                          struct mlx5_flow_dv_matcher *ref,
10616                          union mlx5_flow_tbl_key *key,
10617                          struct mlx5_flow *dev_flow,
10618                          const struct mlx5_flow_tunnel *tunnel,
10619                          uint32_t group_id,
10620                          struct rte_flow_error *error)
10621 {
10622         struct mlx5_list_entry *entry;
10623         struct mlx5_flow_dv_matcher *resource;
10624         struct mlx5_flow_tbl_resource *tbl;
10625         struct mlx5_flow_tbl_data_entry *tbl_data;
10626         struct mlx5_flow_cb_ctx ctx = {
10627                 .error = error,
10628                 .data = ref,
10629         };
10630         /**
10631          * tunnel offload API requires this registration for cases when
10632          * tunnel match rule was inserted before tunnel set rule.
10633          */
10634         tbl = flow_dv_tbl_resource_get(dev, key->level,
10635                                        key->is_egress, key->is_fdb,
10636                                        dev_flow->external, tunnel,
10637                                        group_id, 0, key->id, error);
10638         if (!tbl)
10639                 return -rte_errno;      /* No need to refill the error info */
10640         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10641         ref->tbl = tbl;
10642         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10643         if (!entry) {
10644                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10645                 return rte_flow_error_set(error, ENOMEM,
10646                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10647                                           "cannot allocate ref memory");
10648         }
10649         resource = container_of(entry, typeof(*resource), entry);
10650         dev_flow->handle->dvh.matcher = resource;
10651         return 0;
10652 }
10653
10654 struct mlx5_list_entry *
10655 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10656 {
10657         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10658         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10659         struct mlx5_flow_dv_tag_resource *entry;
10660         uint32_t idx = 0;
10661         int ret;
10662
10663         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10664         if (!entry) {
10665                 rte_flow_error_set(ctx->error, ENOMEM,
10666                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10667                                    "cannot allocate resource memory");
10668                 return NULL;
10669         }
10670         entry->idx = idx;
10671         entry->tag_id = *(uint32_t *)(ctx->data);
10672         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10673                                                   &entry->action);
10674         if (ret) {
10675                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10676                 rte_flow_error_set(ctx->error, ENOMEM,
10677                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10678                                    NULL, "cannot create action");
10679                 return NULL;
10680         }
10681         return &entry->entry;
10682 }
10683
10684 int
10685 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10686                      void *cb_ctx)
10687 {
10688         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10689         struct mlx5_flow_dv_tag_resource *tag =
10690                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10691
10692         return *(uint32_t *)(ctx->data) != tag->tag_id;
10693 }
10694
10695 struct mlx5_list_entry *
10696 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10697                      void *cb_ctx)
10698 {
10699         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10700         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10701         struct mlx5_flow_dv_tag_resource *entry;
10702         uint32_t idx = 0;
10703
10704         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10705         if (!entry) {
10706                 rte_flow_error_set(ctx->error, ENOMEM,
10707                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10708                                    "cannot allocate tag resource memory");
10709                 return NULL;
10710         }
10711         memcpy(entry, oentry, sizeof(*entry));
10712         entry->idx = idx;
10713         return &entry->entry;
10714 }
10715
10716 void
10717 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10718 {
10719         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10720         struct mlx5_flow_dv_tag_resource *tag =
10721                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10722
10723         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10724 }
10725
10726 /**
10727  * Find existing tag resource or create and register a new one.
10728  *
10729  * @param dev[in, out]
10730  *   Pointer to rte_eth_dev structure.
10731  * @param[in, out] tag_be24
10732  *   Tag value in big endian then R-shift 8.
10733  * @parm[in, out] dev_flow
10734  *   Pointer to the dev_flow.
10735  * @param[out] error
10736  *   pointer to error structure.
10737  *
10738  * @return
10739  *   0 on success otherwise -errno and errno is set.
10740  */
10741 static int
10742 flow_dv_tag_resource_register
10743                         (struct rte_eth_dev *dev,
10744                          uint32_t tag_be24,
10745                          struct mlx5_flow *dev_flow,
10746                          struct rte_flow_error *error)
10747 {
10748         struct mlx5_priv *priv = dev->data->dev_private;
10749         struct mlx5_flow_dv_tag_resource *resource;
10750         struct mlx5_list_entry *entry;
10751         struct mlx5_flow_cb_ctx ctx = {
10752                                         .error = error,
10753                                         .data = &tag_be24,
10754                                         };
10755         struct mlx5_hlist *tag_table;
10756
10757         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10758                                       "tags",
10759                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10760                                       false, false, priv->sh,
10761                                       flow_dv_tag_create_cb,
10762                                       flow_dv_tag_match_cb,
10763                                       flow_dv_tag_remove_cb,
10764                                       flow_dv_tag_clone_cb,
10765                                       flow_dv_tag_clone_free_cb,
10766                                       error);
10767         if (unlikely(!tag_table))
10768                 return -rte_errno;
10769         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10770         if (entry) {
10771                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10772                                         entry);
10773                 dev_flow->handle->dvh.rix_tag = resource->idx;
10774                 dev_flow->dv.tag_resource = resource;
10775                 return 0;
10776         }
10777         return -rte_errno;
10778 }
10779
10780 void
10781 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10782 {
10783         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10784         struct mlx5_flow_dv_tag_resource *tag =
10785                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10786
10787         MLX5_ASSERT(tag && sh && tag->action);
10788         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10789         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10790         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10791 }
10792
10793 /**
10794  * Release the tag.
10795  *
10796  * @param dev
10797  *   Pointer to Ethernet device.
10798  * @param tag_idx
10799  *   Tag index.
10800  *
10801  * @return
10802  *   1 while a reference on it exists, 0 when freed.
10803  */
10804 static int
10805 flow_dv_tag_release(struct rte_eth_dev *dev,
10806                     uint32_t tag_idx)
10807 {
10808         struct mlx5_priv *priv = dev->data->dev_private;
10809         struct mlx5_flow_dv_tag_resource *tag;
10810
10811         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10812         if (!tag)
10813                 return 0;
10814         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10815                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10816         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10817 }
10818
10819 /**
10820  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10821  *
10822  * @param[in] dev
10823  *   Pointer to rte_eth_dev structure.
10824  * @param[in] action
10825  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10826  * @param[out] dst_port_id
10827  *   The target port ID.
10828  * @param[out] error
10829  *   Pointer to the error structure.
10830  *
10831  * @return
10832  *   0 on success, a negative errno value otherwise and rte_errno is set.
10833  */
10834 static int
10835 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10836                                  const struct rte_flow_action *action,
10837                                  uint32_t *dst_port_id,
10838                                  struct rte_flow_error *error)
10839 {
10840         uint32_t port;
10841         struct mlx5_priv *priv;
10842
10843         switch (action->type) {
10844         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10845                 const struct rte_flow_action_port_id *conf;
10846
10847                 conf = (const struct rte_flow_action_port_id *)action->conf;
10848                 port = conf->original ? dev->data->port_id : conf->id;
10849                 break;
10850         }
10851         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10852                 const struct rte_flow_action_ethdev *ethdev;
10853
10854                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10855                 port = ethdev->port_id;
10856                 break;
10857         }
10858         default:
10859                 MLX5_ASSERT(false);
10860                 return rte_flow_error_set(error, EINVAL,
10861                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10862                                           "unknown E-Switch action");
10863         }
10864
10865         priv = mlx5_port_to_eswitch_info(port, false);
10866         if (!priv)
10867                 return rte_flow_error_set(error, -rte_errno,
10868                                           RTE_FLOW_ERROR_TYPE_ACTION,
10869                                           NULL,
10870                                           "No eswitch info was found for port");
10871 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10872         /*
10873          * This parameter is transferred to
10874          * mlx5dv_dr_action_create_dest_ib_port().
10875          */
10876         *dst_port_id = priv->dev_port;
10877 #else
10878         /*
10879          * Legacy mode, no LAG configurations is supported.
10880          * This parameter is transferred to
10881          * mlx5dv_dr_action_create_dest_vport().
10882          */
10883         *dst_port_id = priv->vport_id;
10884 #endif
10885         return 0;
10886 }
10887
10888 /**
10889  * Create a counter with aging configuration.
10890  *
10891  * @param[in] dev
10892  *   Pointer to rte_eth_dev structure.
10893  * @param[in] dev_flow
10894  *   Pointer to the mlx5_flow.
10895  * @param[out] count
10896  *   Pointer to the counter action configuration.
10897  * @param[in] age
10898  *   Pointer to the aging action configuration.
10899  *
10900  * @return
10901  *   Index to flow counter on success, 0 otherwise.
10902  */
10903 static uint32_t
10904 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10905                                 struct mlx5_flow *dev_flow,
10906                                 const struct rte_flow_action_count *count
10907                                         __rte_unused,
10908                                 const struct rte_flow_action_age *age)
10909 {
10910         uint32_t counter;
10911         struct mlx5_age_param *age_param;
10912
10913         counter = flow_dv_counter_alloc(dev, !!age);
10914         if (!counter || age == NULL)
10915                 return counter;
10916         age_param = flow_dv_counter_idx_get_age(dev, counter);
10917         age_param->context = age->context ? age->context :
10918                 (void *)(uintptr_t)(dev_flow->flow_idx);
10919         age_param->timeout = age->timeout;
10920         age_param->port_id = dev->data->port_id;
10921         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10922         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10923         return counter;
10924 }
10925
10926 /**
10927  * Add Tx queue matcher
10928  *
10929  * @param[in] dev
10930  *   Pointer to the dev struct.
10931  * @param[in, out] matcher
10932  *   Flow matcher.
10933  * @param[in, out] key
10934  *   Flow matcher value.
10935  * @param[in] item
10936  *   Flow pattern to translate.
10937  * @param[in] inner
10938  *   Item is inner pattern.
10939  */
10940 static void
10941 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10942                                 void *matcher, void *key,
10943                                 const struct rte_flow_item *item)
10944 {
10945         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10946         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10947         void *misc_m =
10948                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10949         void *misc_v =
10950                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10951         struct mlx5_txq_ctrl *txq;
10952         uint32_t queue, mask;
10953
10954         queue_m = (const void *)item->mask;
10955         queue_v = (const void *)item->spec;
10956         if (!queue_v)
10957                 return;
10958         txq = mlx5_txq_get(dev, queue_v->queue);
10959         if (!txq)
10960                 return;
10961         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10962                 queue = txq->obj->sq->id;
10963         else
10964                 queue = txq->obj->sq_obj.sq->id;
10965         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10966         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10967         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10968         mlx5_txq_release(dev, queue_v->queue);
10969 }
10970
10971 /**
10972  * Set the hash fields according to the @p flow information.
10973  *
10974  * @param[in] dev_flow
10975  *   Pointer to the mlx5_flow.
10976  * @param[in] rss_desc
10977  *   Pointer to the mlx5_flow_rss_desc.
10978  */
10979 static void
10980 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10981                        struct mlx5_flow_rss_desc *rss_desc)
10982 {
10983         uint64_t items = dev_flow->handle->layers;
10984         int rss_inner = 0;
10985         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10986
10987         dev_flow->hash_fields = 0;
10988 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10989         if (rss_desc->level >= 2)
10990                 rss_inner = 1;
10991 #endif
10992         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10993             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10994                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10995                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10996                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10997                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10998                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10999                         else
11000                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
11001                 }
11002         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
11003                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
11004                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11005                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11006                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
11007                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11008                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
11009                         else
11010                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
11011                 }
11012         }
11013         if (dev_flow->hash_fields == 0)
11014                 /*
11015                  * There is no match between the RSS types and the
11016                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11017                  */
11018                 return;
11019         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11020             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
11021                 if (rss_types & RTE_ETH_RSS_UDP) {
11022                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11023                                 dev_flow->hash_fields |=
11024                                                 IBV_RX_HASH_SRC_PORT_UDP;
11025                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11026                                 dev_flow->hash_fields |=
11027                                                 IBV_RX_HASH_DST_PORT_UDP;
11028                         else
11029                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
11030                 }
11031         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11032                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
11033                 if (rss_types & RTE_ETH_RSS_TCP) {
11034                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11035                                 dev_flow->hash_fields |=
11036                                                 IBV_RX_HASH_SRC_PORT_TCP;
11037                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11038                                 dev_flow->hash_fields |=
11039                                                 IBV_RX_HASH_DST_PORT_TCP;
11040                         else
11041                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
11042                 }
11043         }
11044         if (rss_inner)
11045                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
11046 }
11047
11048 /**
11049  * Prepare an Rx Hash queue.
11050  *
11051  * @param dev
11052  *   Pointer to Ethernet device.
11053  * @param[in] dev_flow
11054  *   Pointer to the mlx5_flow.
11055  * @param[in] rss_desc
11056  *   Pointer to the mlx5_flow_rss_desc.
11057  * @param[out] hrxq_idx
11058  *   Hash Rx queue index.
11059  *
11060  * @return
11061  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11062  */
11063 static struct mlx5_hrxq *
11064 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11065                      struct mlx5_flow *dev_flow,
11066                      struct mlx5_flow_rss_desc *rss_desc,
11067                      uint32_t *hrxq_idx)
11068 {
11069         struct mlx5_priv *priv = dev->data->dev_private;
11070         struct mlx5_flow_handle *dh = dev_flow->handle;
11071         struct mlx5_hrxq *hrxq;
11072
11073         MLX5_ASSERT(rss_desc->queue_num);
11074         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11075         rss_desc->hash_fields = dev_flow->hash_fields;
11076         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11077         rss_desc->shared_rss = 0;
11078         if (rss_desc->hash_fields == 0)
11079                 rss_desc->queue_num = 1;
11080         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11081         if (!*hrxq_idx)
11082                 return NULL;
11083         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11084                               *hrxq_idx);
11085         return hrxq;
11086 }
11087
11088 /**
11089  * Release sample sub action resource.
11090  *
11091  * @param[in, out] dev
11092  *   Pointer to rte_eth_dev structure.
11093  * @param[in] act_res
11094  *   Pointer to sample sub action resource.
11095  */
11096 static void
11097 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11098                                    struct mlx5_flow_sub_actions_idx *act_res)
11099 {
11100         if (act_res->rix_hrxq) {
11101                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11102                 act_res->rix_hrxq = 0;
11103         }
11104         if (act_res->rix_encap_decap) {
11105                 flow_dv_encap_decap_resource_release(dev,
11106                                                      act_res->rix_encap_decap);
11107                 act_res->rix_encap_decap = 0;
11108         }
11109         if (act_res->rix_port_id_action) {
11110                 flow_dv_port_id_action_resource_release(dev,
11111                                                 act_res->rix_port_id_action);
11112                 act_res->rix_port_id_action = 0;
11113         }
11114         if (act_res->rix_tag) {
11115                 flow_dv_tag_release(dev, act_res->rix_tag);
11116                 act_res->rix_tag = 0;
11117         }
11118         if (act_res->rix_jump) {
11119                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11120                 act_res->rix_jump = 0;
11121         }
11122 }
11123
11124 int
11125 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11126                         struct mlx5_list_entry *entry, void *cb_ctx)
11127 {
11128         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11129         struct rte_eth_dev *dev = ctx->dev;
11130         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11131         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11132                                                               typeof(*resource),
11133                                                               entry);
11134
11135         if (ctx_resource->ratio == resource->ratio &&
11136             ctx_resource->ft_type == resource->ft_type &&
11137             ctx_resource->ft_id == resource->ft_id &&
11138             ctx_resource->set_action == resource->set_action &&
11139             !memcmp((void *)&ctx_resource->sample_act,
11140                     (void *)&resource->sample_act,
11141                     sizeof(struct mlx5_flow_sub_actions_list))) {
11142                 /*
11143                  * Existing sample action should release the prepared
11144                  * sub-actions reference counter.
11145                  */
11146                 flow_dv_sample_sub_actions_release(dev,
11147                                                    &ctx_resource->sample_idx);
11148                 return 0;
11149         }
11150         return 1;
11151 }
11152
11153 struct mlx5_list_entry *
11154 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11155 {
11156         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11157         struct rte_eth_dev *dev = ctx->dev;
11158         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11159         void **sample_dv_actions = ctx_resource->sub_actions;
11160         struct mlx5_flow_dv_sample_resource *resource;
11161         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11162         struct mlx5_priv *priv = dev->data->dev_private;
11163         struct mlx5_dev_ctx_shared *sh = priv->sh;
11164         struct mlx5_flow_tbl_resource *tbl;
11165         uint32_t idx = 0;
11166         const uint32_t next_ft_step = 1;
11167         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11168         uint8_t is_egress = 0;
11169         uint8_t is_transfer = 0;
11170         struct rte_flow_error *error = ctx->error;
11171
11172         /* Register new sample resource. */
11173         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11174         if (!resource) {
11175                 rte_flow_error_set(error, ENOMEM,
11176                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11177                                           NULL,
11178                                           "cannot allocate resource memory");
11179                 return NULL;
11180         }
11181         *resource = *ctx_resource;
11182         /* Create normal path table level */
11183         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11184                 is_transfer = 1;
11185         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11186                 is_egress = 1;
11187         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11188                                         is_egress, is_transfer,
11189                                         true, NULL, 0, 0, 0, error);
11190         if (!tbl) {
11191                 rte_flow_error_set(error, ENOMEM,
11192                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11193                                           NULL,
11194                                           "fail to create normal path table "
11195                                           "for sample");
11196                 goto error;
11197         }
11198         resource->normal_path_tbl = tbl;
11199         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11200                 if (!sh->default_miss_action) {
11201                         rte_flow_error_set(error, ENOMEM,
11202                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11203                                                 NULL,
11204                                                 "default miss action was not "
11205                                                 "created");
11206                         goto error;
11207                 }
11208                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11209                                                 sh->default_miss_action;
11210         }
11211         /* Create a DR sample action */
11212         sampler_attr.sample_ratio = resource->ratio;
11213         sampler_attr.default_next_table = tbl->obj;
11214         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11215         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11216                                                         &sample_dv_actions[0];
11217         sampler_attr.action = resource->set_action;
11218         if (mlx5_os_flow_dr_create_flow_action_sampler
11219                         (&sampler_attr, &resource->verbs_action)) {
11220                 rte_flow_error_set(error, ENOMEM,
11221                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11222                                         NULL, "cannot create sample action");
11223                 goto error;
11224         }
11225         resource->idx = idx;
11226         resource->dev = dev;
11227         return &resource->entry;
11228 error:
11229         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11230                 flow_dv_sample_sub_actions_release(dev,
11231                                                    &resource->sample_idx);
11232         if (resource->normal_path_tbl)
11233                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11234                                 resource->normal_path_tbl);
11235         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11236         return NULL;
11237
11238 }
11239
11240 struct mlx5_list_entry *
11241 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11242                          struct mlx5_list_entry *entry __rte_unused,
11243                          void *cb_ctx)
11244 {
11245         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11246         struct rte_eth_dev *dev = ctx->dev;
11247         struct mlx5_flow_dv_sample_resource *resource;
11248         struct mlx5_priv *priv = dev->data->dev_private;
11249         struct mlx5_dev_ctx_shared *sh = priv->sh;
11250         uint32_t idx = 0;
11251
11252         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11253         if (!resource) {
11254                 rte_flow_error_set(ctx->error, ENOMEM,
11255                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11256                                           NULL,
11257                                           "cannot allocate resource memory");
11258                 return NULL;
11259         }
11260         memcpy(resource, entry, sizeof(*resource));
11261         resource->idx = idx;
11262         resource->dev = dev;
11263         return &resource->entry;
11264 }
11265
11266 void
11267 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11268                              struct mlx5_list_entry *entry)
11269 {
11270         struct mlx5_flow_dv_sample_resource *resource =
11271                                   container_of(entry, typeof(*resource), entry);
11272         struct rte_eth_dev *dev = resource->dev;
11273         struct mlx5_priv *priv = dev->data->dev_private;
11274
11275         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11276 }
11277
11278 /**
11279  * Find existing sample resource or create and register a new one.
11280  *
11281  * @param[in, out] dev
11282  *   Pointer to rte_eth_dev structure.
11283  * @param[in] ref
11284  *   Pointer to sample resource reference.
11285  * @parm[in, out] dev_flow
11286  *   Pointer to the dev_flow.
11287  * @param[out] error
11288  *   pointer to error structure.
11289  *
11290  * @return
11291  *   0 on success otherwise -errno and errno is set.
11292  */
11293 static int
11294 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11295                          struct mlx5_flow_dv_sample_resource *ref,
11296                          struct mlx5_flow *dev_flow,
11297                          struct rte_flow_error *error)
11298 {
11299         struct mlx5_flow_dv_sample_resource *resource;
11300         struct mlx5_list_entry *entry;
11301         struct mlx5_priv *priv = dev->data->dev_private;
11302         struct mlx5_flow_cb_ctx ctx = {
11303                 .dev = dev,
11304                 .error = error,
11305                 .data = ref,
11306         };
11307
11308         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11309         if (!entry)
11310                 return -rte_errno;
11311         resource = container_of(entry, typeof(*resource), entry);
11312         dev_flow->handle->dvh.rix_sample = resource->idx;
11313         dev_flow->dv.sample_res = resource;
11314         return 0;
11315 }
11316
11317 int
11318 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11319                             struct mlx5_list_entry *entry, void *cb_ctx)
11320 {
11321         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11322         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11323         struct rte_eth_dev *dev = ctx->dev;
11324         struct mlx5_flow_dv_dest_array_resource *resource =
11325                                   container_of(entry, typeof(*resource), entry);
11326         uint32_t idx = 0;
11327
11328         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11329             ctx_resource->ft_type == resource->ft_type &&
11330             !memcmp((void *)resource->sample_act,
11331                     (void *)ctx_resource->sample_act,
11332                    (ctx_resource->num_of_dest *
11333                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11334                 /*
11335                  * Existing sample action should release the prepared
11336                  * sub-actions reference counter.
11337                  */
11338                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11339                         flow_dv_sample_sub_actions_release(dev,
11340                                         &ctx_resource->sample_idx[idx]);
11341                 return 0;
11342         }
11343         return 1;
11344 }
11345
11346 struct mlx5_list_entry *
11347 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11348 {
11349         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11350         struct rte_eth_dev *dev = ctx->dev;
11351         struct mlx5_flow_dv_dest_array_resource *resource;
11352         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11353         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11354         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11355         struct mlx5_priv *priv = dev->data->dev_private;
11356         struct mlx5_dev_ctx_shared *sh = priv->sh;
11357         struct mlx5_flow_sub_actions_list *sample_act;
11358         struct mlx5dv_dr_domain *domain;
11359         uint32_t idx = 0, res_idx = 0;
11360         struct rte_flow_error *error = ctx->error;
11361         uint64_t action_flags;
11362         int ret;
11363
11364         /* Register new destination array resource. */
11365         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11366                                             &res_idx);
11367         if (!resource) {
11368                 rte_flow_error_set(error, ENOMEM,
11369                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11370                                           NULL,
11371                                           "cannot allocate resource memory");
11372                 return NULL;
11373         }
11374         *resource = *ctx_resource;
11375         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11376                 domain = sh->fdb_domain;
11377         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11378                 domain = sh->rx_domain;
11379         else
11380                 domain = sh->tx_domain;
11381         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11382                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11383                                  mlx5_malloc(MLX5_MEM_ZERO,
11384                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11385                                  0, SOCKET_ID_ANY);
11386                 if (!dest_attr[idx]) {
11387                         rte_flow_error_set(error, ENOMEM,
11388                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11389                                            NULL,
11390                                            "cannot allocate resource memory");
11391                         goto error;
11392                 }
11393                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11394                 sample_act = &ctx_resource->sample_act[idx];
11395                 action_flags = sample_act->action_flags;
11396                 switch (action_flags) {
11397                 case MLX5_FLOW_ACTION_QUEUE:
11398                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11399                         break;
11400                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11401                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11402                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11403                         dest_attr[idx]->dest_reformat->reformat =
11404                                         sample_act->dr_encap_action;
11405                         dest_attr[idx]->dest_reformat->dest =
11406                                         sample_act->dr_port_id_action;
11407                         break;
11408                 case MLX5_FLOW_ACTION_PORT_ID:
11409                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11410                         break;
11411                 case MLX5_FLOW_ACTION_JUMP:
11412                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11413                         break;
11414                 default:
11415                         rte_flow_error_set(error, EINVAL,
11416                                            RTE_FLOW_ERROR_TYPE_ACTION,
11417                                            NULL,
11418                                            "unsupported actions type");
11419                         goto error;
11420                 }
11421         }
11422         /* create a dest array action */
11423         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11424                                                 (domain,
11425                                                  resource->num_of_dest,
11426                                                  dest_attr,
11427                                                  &resource->action);
11428         if (ret) {
11429                 rte_flow_error_set(error, ENOMEM,
11430                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11431                                    NULL,
11432                                    "cannot create destination array action");
11433                 goto error;
11434         }
11435         resource->idx = res_idx;
11436         resource->dev = dev;
11437         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11438                 mlx5_free(dest_attr[idx]);
11439         return &resource->entry;
11440 error:
11441         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11442                 flow_dv_sample_sub_actions_release(dev,
11443                                                    &resource->sample_idx[idx]);
11444                 if (dest_attr[idx])
11445                         mlx5_free(dest_attr[idx]);
11446         }
11447         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11448         return NULL;
11449 }
11450
11451 struct mlx5_list_entry *
11452 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11453                             struct mlx5_list_entry *entry __rte_unused,
11454                             void *cb_ctx)
11455 {
11456         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11457         struct rte_eth_dev *dev = ctx->dev;
11458         struct mlx5_flow_dv_dest_array_resource *resource;
11459         struct mlx5_priv *priv = dev->data->dev_private;
11460         struct mlx5_dev_ctx_shared *sh = priv->sh;
11461         uint32_t res_idx = 0;
11462         struct rte_flow_error *error = ctx->error;
11463
11464         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11465                                       &res_idx);
11466         if (!resource) {
11467                 rte_flow_error_set(error, ENOMEM,
11468                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11469                                           NULL,
11470                                           "cannot allocate dest-array memory");
11471                 return NULL;
11472         }
11473         memcpy(resource, entry, sizeof(*resource));
11474         resource->idx = res_idx;
11475         resource->dev = dev;
11476         return &resource->entry;
11477 }
11478
11479 void
11480 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11481                                  struct mlx5_list_entry *entry)
11482 {
11483         struct mlx5_flow_dv_dest_array_resource *resource =
11484                         container_of(entry, typeof(*resource), entry);
11485         struct rte_eth_dev *dev = resource->dev;
11486         struct mlx5_priv *priv = dev->data->dev_private;
11487
11488         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11489 }
11490
11491 /**
11492  * Find existing destination array resource or create and register a new one.
11493  *
11494  * @param[in, out] dev
11495  *   Pointer to rte_eth_dev structure.
11496  * @param[in] ref
11497  *   Pointer to destination array resource reference.
11498  * @parm[in, out] dev_flow
11499  *   Pointer to the dev_flow.
11500  * @param[out] error
11501  *   pointer to error structure.
11502  *
11503  * @return
11504  *   0 on success otherwise -errno and errno is set.
11505  */
11506 static int
11507 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11508                          struct mlx5_flow_dv_dest_array_resource *ref,
11509                          struct mlx5_flow *dev_flow,
11510                          struct rte_flow_error *error)
11511 {
11512         struct mlx5_flow_dv_dest_array_resource *resource;
11513         struct mlx5_priv *priv = dev->data->dev_private;
11514         struct mlx5_list_entry *entry;
11515         struct mlx5_flow_cb_ctx ctx = {
11516                 .dev = dev,
11517                 .error = error,
11518                 .data = ref,
11519         };
11520
11521         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11522         if (!entry)
11523                 return -rte_errno;
11524         resource = container_of(entry, typeof(*resource), entry);
11525         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11526         dev_flow->dv.dest_array_res = resource;
11527         return 0;
11528 }
11529
11530 /**
11531  * Convert Sample action to DV specification.
11532  *
11533  * @param[in] dev
11534  *   Pointer to rte_eth_dev structure.
11535  * @param[in] action
11536  *   Pointer to sample action structure.
11537  * @param[in, out] dev_flow
11538  *   Pointer to the mlx5_flow.
11539  * @param[in] attr
11540  *   Pointer to the flow attributes.
11541  * @param[in, out] num_of_dest
11542  *   Pointer to the num of destination.
11543  * @param[in, out] sample_actions
11544  *   Pointer to sample actions list.
11545  * @param[in, out] res
11546  *   Pointer to sample resource.
11547  * @param[out] error
11548  *   Pointer to the error structure.
11549  *
11550  * @return
11551  *   0 on success, a negative errno value otherwise and rte_errno is set.
11552  */
11553 static int
11554 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11555                                 const struct rte_flow_action_sample *action,
11556                                 struct mlx5_flow *dev_flow,
11557                                 const struct rte_flow_attr *attr,
11558                                 uint32_t *num_of_dest,
11559                                 void **sample_actions,
11560                                 struct mlx5_flow_dv_sample_resource *res,
11561                                 struct rte_flow_error *error)
11562 {
11563         struct mlx5_priv *priv = dev->data->dev_private;
11564         const struct rte_flow_action *sub_actions;
11565         struct mlx5_flow_sub_actions_list *sample_act;
11566         struct mlx5_flow_sub_actions_idx *sample_idx;
11567         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11568         struct rte_flow *flow = dev_flow->flow;
11569         struct mlx5_flow_rss_desc *rss_desc;
11570         uint64_t action_flags = 0;
11571
11572         MLX5_ASSERT(wks);
11573         rss_desc = &wks->rss_desc;
11574         sample_act = &res->sample_act;
11575         sample_idx = &res->sample_idx;
11576         res->ratio = action->ratio;
11577         sub_actions = action->actions;
11578         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11579                 int type = sub_actions->type;
11580                 uint32_t pre_rix = 0;
11581                 void *pre_r;
11582                 switch (type) {
11583                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11584                 {
11585                         const struct rte_flow_action_queue *queue;
11586                         struct mlx5_hrxq *hrxq;
11587                         uint32_t hrxq_idx;
11588
11589                         queue = sub_actions->conf;
11590                         rss_desc->queue_num = 1;
11591                         rss_desc->queue[0] = queue->index;
11592                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11593                                                     rss_desc, &hrxq_idx);
11594                         if (!hrxq)
11595                                 return rte_flow_error_set
11596                                         (error, rte_errno,
11597                                          RTE_FLOW_ERROR_TYPE_ACTION,
11598                                          NULL,
11599                                          "cannot create fate queue");
11600                         sample_act->dr_queue_action = hrxq->action;
11601                         sample_idx->rix_hrxq = hrxq_idx;
11602                         sample_actions[sample_act->actions_num++] =
11603                                                 hrxq->action;
11604                         (*num_of_dest)++;
11605                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11606                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11607                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11608                         dev_flow->handle->fate_action =
11609                                         MLX5_FLOW_FATE_QUEUE;
11610                         break;
11611                 }
11612                 case RTE_FLOW_ACTION_TYPE_RSS:
11613                 {
11614                         struct mlx5_hrxq *hrxq;
11615                         uint32_t hrxq_idx;
11616                         const struct rte_flow_action_rss *rss;
11617                         const uint8_t *rss_key;
11618
11619                         rss = sub_actions->conf;
11620                         memcpy(rss_desc->queue, rss->queue,
11621                                rss->queue_num * sizeof(uint16_t));
11622                         rss_desc->queue_num = rss->queue_num;
11623                         /* NULL RSS key indicates default RSS key. */
11624                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11625                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11626                         /*
11627                          * rss->level and rss.types should be set in advance
11628                          * when expanding items for RSS.
11629                          */
11630                         flow_dv_hashfields_set(dev_flow, rss_desc);
11631                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11632                                                     rss_desc, &hrxq_idx);
11633                         if (!hrxq)
11634                                 return rte_flow_error_set
11635                                         (error, rte_errno,
11636                                          RTE_FLOW_ERROR_TYPE_ACTION,
11637                                          NULL,
11638                                          "cannot create fate queue");
11639                         sample_act->dr_queue_action = hrxq->action;
11640                         sample_idx->rix_hrxq = hrxq_idx;
11641                         sample_actions[sample_act->actions_num++] =
11642                                                 hrxq->action;
11643                         (*num_of_dest)++;
11644                         action_flags |= MLX5_FLOW_ACTION_RSS;
11645                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11646                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11647                         dev_flow->handle->fate_action =
11648                                         MLX5_FLOW_FATE_QUEUE;
11649                         break;
11650                 }
11651                 case RTE_FLOW_ACTION_TYPE_MARK:
11652                 {
11653                         uint32_t tag_be = mlx5_flow_mark_set
11654                                 (((const struct rte_flow_action_mark *)
11655                                 (sub_actions->conf))->id);
11656
11657                         wks->mark = 1;
11658                         pre_rix = dev_flow->handle->dvh.rix_tag;
11659                         /* Save the mark resource before sample */
11660                         pre_r = dev_flow->dv.tag_resource;
11661                         if (flow_dv_tag_resource_register(dev, tag_be,
11662                                                   dev_flow, error))
11663                                 return -rte_errno;
11664                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11665                         sample_act->dr_tag_action =
11666                                 dev_flow->dv.tag_resource->action;
11667                         sample_idx->rix_tag =
11668                                 dev_flow->handle->dvh.rix_tag;
11669                         sample_actions[sample_act->actions_num++] =
11670                                                 sample_act->dr_tag_action;
11671                         /* Recover the mark resource after sample */
11672                         dev_flow->dv.tag_resource = pre_r;
11673                         dev_flow->handle->dvh.rix_tag = pre_rix;
11674                         action_flags |= MLX5_FLOW_ACTION_MARK;
11675                         break;
11676                 }
11677                 case RTE_FLOW_ACTION_TYPE_COUNT:
11678                 {
11679                         if (!flow->counter) {
11680                                 flow->counter =
11681                                         flow_dv_translate_create_counter(dev,
11682                                                 dev_flow, sub_actions->conf,
11683                                                 0);
11684                                 if (!flow->counter)
11685                                         return rte_flow_error_set
11686                                                 (error, rte_errno,
11687                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11688                                                 NULL,
11689                                                 "cannot create counter"
11690                                                 " object.");
11691                         }
11692                         sample_act->dr_cnt_action =
11693                                   (flow_dv_counter_get_by_idx(dev,
11694                                   flow->counter, NULL))->action;
11695                         sample_actions[sample_act->actions_num++] =
11696                                                 sample_act->dr_cnt_action;
11697                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11698                         break;
11699                 }
11700                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11701                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11702                 {
11703                         struct mlx5_flow_dv_port_id_action_resource
11704                                         port_id_resource;
11705                         uint32_t port_id = 0;
11706
11707                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11708                         /* Save the port id resource before sample */
11709                         pre_rix = dev_flow->handle->rix_port_id_action;
11710                         pre_r = dev_flow->dv.port_id_action;
11711                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11712                                                              &port_id, error))
11713                                 return -rte_errno;
11714                         port_id_resource.port_id = port_id;
11715                         if (flow_dv_port_id_action_resource_register
11716                             (dev, &port_id_resource, dev_flow, error))
11717                                 return -rte_errno;
11718                         sample_act->dr_port_id_action =
11719                                 dev_flow->dv.port_id_action->action;
11720                         sample_idx->rix_port_id_action =
11721                                 dev_flow->handle->rix_port_id_action;
11722                         sample_actions[sample_act->actions_num++] =
11723                                                 sample_act->dr_port_id_action;
11724                         /* Recover the port id resource after sample */
11725                         dev_flow->dv.port_id_action = pre_r;
11726                         dev_flow->handle->rix_port_id_action = pre_rix;
11727                         (*num_of_dest)++;
11728                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11729                         break;
11730                 }
11731                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11732                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11733                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11734                         /* Save the encap resource before sample */
11735                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11736                         pre_r = dev_flow->dv.encap_decap;
11737                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11738                                                            dev_flow,
11739                                                            attr->transfer,
11740                                                            error))
11741                                 return -rte_errno;
11742                         sample_act->dr_encap_action =
11743                                 dev_flow->dv.encap_decap->action;
11744                         sample_idx->rix_encap_decap =
11745                                 dev_flow->handle->dvh.rix_encap_decap;
11746                         sample_actions[sample_act->actions_num++] =
11747                                                 sample_act->dr_encap_action;
11748                         /* Recover the encap resource after sample */
11749                         dev_flow->dv.encap_decap = pre_r;
11750                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11751                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11752                         break;
11753                 default:
11754                         return rte_flow_error_set(error, EINVAL,
11755                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11756                                 NULL,
11757                                 "Not support for sampler action");
11758                 }
11759         }
11760         sample_act->action_flags = action_flags;
11761         res->ft_id = dev_flow->dv.group;
11762         if (attr->transfer) {
11763                 union {
11764                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11765                         uint64_t set_action;
11766                 } action_ctx = { .set_action = 0 };
11767
11768                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11769                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11770                          MLX5_MODIFICATION_TYPE_SET);
11771                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11772                          MLX5_MODI_META_REG_C_0);
11773                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11774                          priv->vport_meta_tag);
11775                 res->set_action = action_ctx.set_action;
11776         } else if (attr->ingress) {
11777                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11778         } else {
11779                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11780         }
11781         return 0;
11782 }
11783
11784 /**
11785  * Convert Sample action to DV specification.
11786  *
11787  * @param[in] dev
11788  *   Pointer to rte_eth_dev structure.
11789  * @param[in, out] dev_flow
11790  *   Pointer to the mlx5_flow.
11791  * @param[in] num_of_dest
11792  *   The num of destination.
11793  * @param[in, out] res
11794  *   Pointer to sample resource.
11795  * @param[in, out] mdest_res
11796  *   Pointer to destination array resource.
11797  * @param[in] sample_actions
11798  *   Pointer to sample path actions list.
11799  * @param[in] action_flags
11800  *   Holds the actions detected until now.
11801  * @param[out] error
11802  *   Pointer to the error structure.
11803  *
11804  * @return
11805  *   0 on success, a negative errno value otherwise and rte_errno is set.
11806  */
11807 static int
11808 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11809                              struct mlx5_flow *dev_flow,
11810                              uint32_t num_of_dest,
11811                              struct mlx5_flow_dv_sample_resource *res,
11812                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11813                              void **sample_actions,
11814                              uint64_t action_flags,
11815                              struct rte_flow_error *error)
11816 {
11817         /* update normal path action resource into last index of array */
11818         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11819         struct mlx5_flow_sub_actions_list *sample_act =
11820                                         &mdest_res->sample_act[dest_index];
11821         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11822         struct mlx5_flow_rss_desc *rss_desc;
11823         uint32_t normal_idx = 0;
11824         struct mlx5_hrxq *hrxq;
11825         uint32_t hrxq_idx;
11826
11827         MLX5_ASSERT(wks);
11828         rss_desc = &wks->rss_desc;
11829         if (num_of_dest > 1) {
11830                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11831                         /* Handle QP action for mirroring */
11832                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11833                                                     rss_desc, &hrxq_idx);
11834                         if (!hrxq)
11835                                 return rte_flow_error_set
11836                                      (error, rte_errno,
11837                                       RTE_FLOW_ERROR_TYPE_ACTION,
11838                                       NULL,
11839                                       "cannot create rx queue");
11840                         normal_idx++;
11841                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11842                         sample_act->dr_queue_action = hrxq->action;
11843                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11844                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11845                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11846                 }
11847                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11848                         normal_idx++;
11849                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11850                                 dev_flow->handle->dvh.rix_encap_decap;
11851                         sample_act->dr_encap_action =
11852                                 dev_flow->dv.encap_decap->action;
11853                         dev_flow->handle->dvh.rix_encap_decap = 0;
11854                 }
11855                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11856                         normal_idx++;
11857                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11858                                 dev_flow->handle->rix_port_id_action;
11859                         sample_act->dr_port_id_action =
11860                                 dev_flow->dv.port_id_action->action;
11861                         dev_flow->handle->rix_port_id_action = 0;
11862                 }
11863                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11864                         normal_idx++;
11865                         mdest_res->sample_idx[dest_index].rix_jump =
11866                                 dev_flow->handle->rix_jump;
11867                         sample_act->dr_jump_action =
11868                                 dev_flow->dv.jump->action;
11869                         dev_flow->handle->rix_jump = 0;
11870                 }
11871                 sample_act->actions_num = normal_idx;
11872                 /* update sample action resource into first index of array */
11873                 mdest_res->ft_type = res->ft_type;
11874                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11875                                 sizeof(struct mlx5_flow_sub_actions_idx));
11876                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11877                                 sizeof(struct mlx5_flow_sub_actions_list));
11878                 mdest_res->num_of_dest = num_of_dest;
11879                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11880                                                          dev_flow, error))
11881                         return rte_flow_error_set(error, EINVAL,
11882                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11883                                                   NULL, "can't create sample "
11884                                                   "action");
11885         } else {
11886                 res->sub_actions = sample_actions;
11887                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11888                         return rte_flow_error_set(error, EINVAL,
11889                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11890                                                   NULL,
11891                                                   "can't create sample action");
11892         }
11893         return 0;
11894 }
11895
11896 /**
11897  * Remove an ASO age action from age actions list.
11898  *
11899  * @param[in] dev
11900  *   Pointer to the Ethernet device structure.
11901  * @param[in] age
11902  *   Pointer to the aso age action handler.
11903  */
11904 static void
11905 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11906                                 struct mlx5_aso_age_action *age)
11907 {
11908         struct mlx5_age_info *age_info;
11909         struct mlx5_age_param *age_param = &age->age_params;
11910         struct mlx5_priv *priv = dev->data->dev_private;
11911         uint16_t expected = AGE_CANDIDATE;
11912
11913         age_info = GET_PORT_AGE_INFO(priv);
11914         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11915                                          AGE_FREE, false, __ATOMIC_RELAXED,
11916                                          __ATOMIC_RELAXED)) {
11917                 /**
11918                  * We need the lock even it is age timeout,
11919                  * since age action may still in process.
11920                  */
11921                 rte_spinlock_lock(&age_info->aged_sl);
11922                 LIST_REMOVE(age, next);
11923                 rte_spinlock_unlock(&age_info->aged_sl);
11924                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11925         }
11926 }
11927
11928 /**
11929  * Release an ASO age action.
11930  *
11931  * @param[in] dev
11932  *   Pointer to the Ethernet device structure.
11933  * @param[in] age_idx
11934  *   Index of ASO age action to release.
11935  * @param[in] flow
11936  *   True if the release operation is during flow destroy operation.
11937  *   False if the release operation is during action destroy operation.
11938  *
11939  * @return
11940  *   0 when age action was removed, otherwise the number of references.
11941  */
11942 static int
11943 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11944 {
11945         struct mlx5_priv *priv = dev->data->dev_private;
11946         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11947         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11948         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11949
11950         if (!ret) {
11951                 flow_dv_aso_age_remove_from_age(dev, age);
11952                 rte_spinlock_lock(&mng->free_sl);
11953                 LIST_INSERT_HEAD(&mng->free, age, next);
11954                 rte_spinlock_unlock(&mng->free_sl);
11955         }
11956         return ret;
11957 }
11958
11959 /**
11960  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11961  *
11962  * @param[in] dev
11963  *   Pointer to the Ethernet device structure.
11964  *
11965  * @return
11966  *   0 on success, otherwise negative errno value and rte_errno is set.
11967  */
11968 static int
11969 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11970 {
11971         struct mlx5_priv *priv = dev->data->dev_private;
11972         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11973         void *old_pools = mng->pools;
11974         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11975         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11976         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11977
11978         if (!pools) {
11979                 rte_errno = ENOMEM;
11980                 return -ENOMEM;
11981         }
11982         if (old_pools) {
11983                 memcpy(pools, old_pools,
11984                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11985                 mlx5_free(old_pools);
11986         } else {
11987                 /* First ASO flow hit allocation - starting ASO data-path. */
11988                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11989
11990                 if (ret) {
11991                         mlx5_free(pools);
11992                         return ret;
11993                 }
11994         }
11995         mng->n = resize;
11996         mng->pools = pools;
11997         return 0;
11998 }
11999
12000 /**
12001  * Create and initialize a new ASO aging pool.
12002  *
12003  * @param[in] dev
12004  *   Pointer to the Ethernet device structure.
12005  * @param[out] age_free
12006  *   Where to put the pointer of a new age action.
12007  *
12008  * @return
12009  *   The age actions pool pointer and @p age_free is set on success,
12010  *   NULL otherwise and rte_errno is set.
12011  */
12012 static struct mlx5_aso_age_pool *
12013 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12014                         struct mlx5_aso_age_action **age_free)
12015 {
12016         struct mlx5_priv *priv = dev->data->dev_private;
12017         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12018         struct mlx5_aso_age_pool *pool = NULL;
12019         struct mlx5_devx_obj *obj = NULL;
12020         uint32_t i;
12021
12022         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12023                                                     priv->sh->cdev->pdn);
12024         if (!obj) {
12025                 rte_errno = ENODATA;
12026                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12027                 return NULL;
12028         }
12029         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12030         if (!pool) {
12031                 claim_zero(mlx5_devx_cmd_destroy(obj));
12032                 rte_errno = ENOMEM;
12033                 return NULL;
12034         }
12035         pool->flow_hit_aso_obj = obj;
12036         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12037         rte_rwlock_write_lock(&mng->resize_rwl);
12038         pool->index = mng->next;
12039         /* Resize pools array if there is no room for the new pool in it. */
12040         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12041                 claim_zero(mlx5_devx_cmd_destroy(obj));
12042                 mlx5_free(pool);
12043                 rte_rwlock_write_unlock(&mng->resize_rwl);
12044                 return NULL;
12045         }
12046         mng->pools[pool->index] = pool;
12047         mng->next++;
12048         rte_rwlock_write_unlock(&mng->resize_rwl);
12049         /* Assign the first action in the new pool, the rest go to free list. */
12050         *age_free = &pool->actions[0];
12051         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12052                 pool->actions[i].offset = i;
12053                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12054         }
12055         return pool;
12056 }
12057
12058 /**
12059  * Allocate a ASO aging bit.
12060  *
12061  * @param[in] dev
12062  *   Pointer to the Ethernet device structure.
12063  * @param[out] error
12064  *   Pointer to the error structure.
12065  *
12066  * @return
12067  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12068  */
12069 static uint32_t
12070 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12071 {
12072         struct mlx5_priv *priv = dev->data->dev_private;
12073         const struct mlx5_aso_age_pool *pool;
12074         struct mlx5_aso_age_action *age_free = NULL;
12075         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12076
12077         MLX5_ASSERT(mng);
12078         /* Try to get the next free age action bit. */
12079         rte_spinlock_lock(&mng->free_sl);
12080         age_free = LIST_FIRST(&mng->free);
12081         if (age_free) {
12082                 LIST_REMOVE(age_free, next);
12083         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12084                 rte_spinlock_unlock(&mng->free_sl);
12085                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12086                                    NULL, "failed to create ASO age pool");
12087                 return 0; /* 0 is an error. */
12088         }
12089         rte_spinlock_unlock(&mng->free_sl);
12090         pool = container_of
12091           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12092                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12093                                                                        actions);
12094         if (!age_free->dr_action) {
12095                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12096                                                  error);
12097
12098                 if (reg_c < 0) {
12099                         rte_flow_error_set(error, rte_errno,
12100                                            RTE_FLOW_ERROR_TYPE_ACTION,
12101                                            NULL, "failed to get reg_c "
12102                                            "for ASO flow hit");
12103                         return 0; /* 0 is an error. */
12104                 }
12105 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12106                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12107                                 (priv->sh->rx_domain,
12108                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12109                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12110                                  (reg_c - REG_C_0));
12111 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12112                 if (!age_free->dr_action) {
12113                         rte_errno = errno;
12114                         rte_spinlock_lock(&mng->free_sl);
12115                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12116                         rte_spinlock_unlock(&mng->free_sl);
12117                         rte_flow_error_set(error, rte_errno,
12118                                            RTE_FLOW_ERROR_TYPE_ACTION,
12119                                            NULL, "failed to create ASO "
12120                                            "flow hit action");
12121                         return 0; /* 0 is an error. */
12122                 }
12123         }
12124         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12125         return pool->index | ((age_free->offset + 1) << 16);
12126 }
12127
12128 /**
12129  * Initialize flow ASO age parameters.
12130  *
12131  * @param[in] dev
12132  *   Pointer to rte_eth_dev structure.
12133  * @param[in] age_idx
12134  *   Index of ASO age action.
12135  * @param[in] context
12136  *   Pointer to flow counter age context.
12137  * @param[in] timeout
12138  *   Aging timeout in seconds.
12139  *
12140  */
12141 static void
12142 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12143                             uint32_t age_idx,
12144                             void *context,
12145                             uint32_t timeout)
12146 {
12147         struct mlx5_aso_age_action *aso_age;
12148
12149         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12150         MLX5_ASSERT(aso_age);
12151         aso_age->age_params.context = context;
12152         aso_age->age_params.timeout = timeout;
12153         aso_age->age_params.port_id = dev->data->port_id;
12154         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12155                          __ATOMIC_RELAXED);
12156         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12157                          __ATOMIC_RELAXED);
12158 }
12159
12160 static void
12161 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12162                                const struct rte_flow_item_integrity *value,
12163                                void *headers_m, void *headers_v)
12164 {
12165         if (mask->l4_ok) {
12166                 /* RTE l4_ok filter aggregates hardware l4_ok and
12167                  * l4_checksum_ok filters.
12168                  * Positive RTE l4_ok match requires hardware match on both L4
12169                  * hardware integrity bits.
12170                  * For negative match, check hardware l4_checksum_ok bit only,
12171                  * because hardware sets that bit to 0 for all packets
12172                  * with bad L4.
12173                  */
12174                 if (value->l4_ok) {
12175                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12176                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12177                 }
12178                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12179                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12180                          !!value->l4_ok);
12181         }
12182         if (mask->l4_csum_ok) {
12183                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12184                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12185                          value->l4_csum_ok);
12186         }
12187 }
12188
12189 static void
12190 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12191                                const struct rte_flow_item_integrity *value,
12192                                void *headers_m, void *headers_v, bool is_ipv4)
12193 {
12194         if (mask->l3_ok) {
12195                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12196                  * ipv4_csum_ok filters.
12197                  * Positive RTE l3_ok match requires hardware match on both L3
12198                  * hardware integrity bits.
12199                  * For negative match, check hardware l3_csum_ok bit only,
12200                  * because hardware sets that bit to 0 for all packets
12201                  * with bad L3.
12202                  */
12203                 if (is_ipv4) {
12204                         if (value->l3_ok) {
12205                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12206                                          l3_ok, 1);
12207                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12208                                          l3_ok, 1);
12209                         }
12210                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12211                                  ipv4_checksum_ok, 1);
12212                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12213                                  ipv4_checksum_ok, !!value->l3_ok);
12214                 } else {
12215                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12216                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12217                                  value->l3_ok);
12218                 }
12219         }
12220         if (mask->ipv4_csum_ok) {
12221                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12222                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12223                          value->ipv4_csum_ok);
12224         }
12225 }
12226
12227 static void
12228 set_integrity_bits(void *headers_m, void *headers_v,
12229                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12230 {
12231         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12232         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12233
12234         /* Integrity bits validation cleared spec pointer */
12235         MLX5_ASSERT(spec != NULL);
12236         if (!mask)
12237                 mask = &rte_flow_item_integrity_mask;
12238         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12239                                        is_l3_ip4);
12240         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12241 }
12242
12243 static void
12244 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12245                                       const
12246                                       struct rte_flow_item *integrity_items[2],
12247                                       uint64_t pattern_flags)
12248 {
12249         void *headers_m, *headers_v;
12250         bool is_l3_ip4;
12251
12252         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12253                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12254                                          inner_headers);
12255                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12256                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12257                             0;
12258                 set_integrity_bits(headers_m, headers_v,
12259                                    integrity_items[1], is_l3_ip4);
12260         }
12261         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12262                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12263                                          outer_headers);
12264                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12265                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12266                             0;
12267                 set_integrity_bits(headers_m, headers_v,
12268                                    integrity_items[0], is_l3_ip4);
12269         }
12270 }
12271
12272 static void
12273 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12274                                  const struct rte_flow_item *integrity_items[2],
12275                                  uint64_t *last_item)
12276 {
12277         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12278
12279         /* integrity bits validation cleared spec pointer */
12280         MLX5_ASSERT(spec != NULL);
12281         if (spec->level > 1) {
12282                 integrity_items[1] = item;
12283                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12284         } else {
12285                 integrity_items[0] = item;
12286                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12287         }
12288 }
12289
12290 /**
12291  * Prepares DV flow counter with aging configuration.
12292  * Gets it by index when exists, creates a new one when doesn't.
12293  *
12294  * @param[in] dev
12295  *   Pointer to rte_eth_dev structure.
12296  * @param[in] dev_flow
12297  *   Pointer to the mlx5_flow.
12298  * @param[in, out] flow
12299  *   Pointer to the sub flow.
12300  * @param[in] count
12301  *   Pointer to the counter action configuration.
12302  * @param[in] age
12303  *   Pointer to the aging action configuration.
12304  * @param[out] error
12305  *   Pointer to the error structure.
12306  *
12307  * @return
12308  *   Pointer to the counter, NULL otherwise.
12309  */
12310 static struct mlx5_flow_counter *
12311 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12312                         struct mlx5_flow *dev_flow,
12313                         struct rte_flow *flow,
12314                         const struct rte_flow_action_count *count,
12315                         const struct rte_flow_action_age *age,
12316                         struct rte_flow_error *error)
12317 {
12318         if (!flow->counter) {
12319                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12320                                                                  count, age);
12321                 if (!flow->counter) {
12322                         rte_flow_error_set(error, rte_errno,
12323                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12324                                            "cannot create counter object.");
12325                         return NULL;
12326                 }
12327         }
12328         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12329 }
12330
12331 /*
12332  * Release an ASO CT action by its own device.
12333  *
12334  * @param[in] dev
12335  *   Pointer to the Ethernet device structure.
12336  * @param[in] idx
12337  *   Index of ASO CT action to release.
12338  *
12339  * @return
12340  *   0 when CT action was removed, otherwise the number of references.
12341  */
12342 static inline int
12343 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12344 {
12345         struct mlx5_priv *priv = dev->data->dev_private;
12346         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12347         uint32_t ret;
12348         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12349         enum mlx5_aso_ct_state state =
12350                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12351
12352         /* Cannot release when CT is in the ASO SQ. */
12353         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12354                 return -1;
12355         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12356         if (!ret) {
12357                 if (ct->dr_action_orig) {
12358 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12359                         claim_zero(mlx5_glue->destroy_flow_action
12360                                         (ct->dr_action_orig));
12361 #endif
12362                         ct->dr_action_orig = NULL;
12363                 }
12364                 if (ct->dr_action_rply) {
12365 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12366                         claim_zero(mlx5_glue->destroy_flow_action
12367                                         (ct->dr_action_rply));
12368 #endif
12369                         ct->dr_action_rply = NULL;
12370                 }
12371                 /* Clear the state to free, no need in 1st allocation. */
12372                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12373                 rte_spinlock_lock(&mng->ct_sl);
12374                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12375                 rte_spinlock_unlock(&mng->ct_sl);
12376         }
12377         return (int)ret;
12378 }
12379
12380 static inline int
12381 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12382                        struct rte_flow_error *error)
12383 {
12384         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12385         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12386         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12387         int ret;
12388
12389         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12390         if (dev->data->dev_started != 1)
12391                 return rte_flow_error_set(error, EAGAIN,
12392                                           RTE_FLOW_ERROR_TYPE_ACTION,
12393                                           NULL,
12394                                           "Indirect CT action cannot be destroyed when the port is stopped");
12395         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12396         if (ret < 0)
12397                 return rte_flow_error_set(error, EAGAIN,
12398                                           RTE_FLOW_ERROR_TYPE_ACTION,
12399                                           NULL,
12400                                           "Current state prevents indirect CT action from being destroyed");
12401         return ret;
12402 }
12403
12404 /*
12405  * Resize the ASO CT pools array by 64 pools.
12406  *
12407  * @param[in] dev
12408  *   Pointer to the Ethernet device structure.
12409  *
12410  * @return
12411  *   0 on success, otherwise negative errno value and rte_errno is set.
12412  */
12413 static int
12414 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12415 {
12416         struct mlx5_priv *priv = dev->data->dev_private;
12417         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12418         void *old_pools = mng->pools;
12419         /* Magic number now, need a macro. */
12420         uint32_t resize = mng->n + 64;
12421         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12422         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12423
12424         if (!pools) {
12425                 rte_errno = ENOMEM;
12426                 return -rte_errno;
12427         }
12428         rte_rwlock_write_lock(&mng->resize_rwl);
12429         /* ASO SQ/QP was already initialized in the startup. */
12430         if (old_pools) {
12431                 /* Realloc could be an alternative choice. */
12432                 rte_memcpy(pools, old_pools,
12433                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12434                 mlx5_free(old_pools);
12435         }
12436         mng->n = resize;
12437         mng->pools = pools;
12438         rte_rwlock_write_unlock(&mng->resize_rwl);
12439         return 0;
12440 }
12441
12442 /*
12443  * Create and initialize a new ASO CT pool.
12444  *
12445  * @param[in] dev
12446  *   Pointer to the Ethernet device structure.
12447  * @param[out] ct_free
12448  *   Where to put the pointer of a new CT action.
12449  *
12450  * @return
12451  *   The CT actions pool pointer and @p ct_free is set on success,
12452  *   NULL otherwise and rte_errno is set.
12453  */
12454 static struct mlx5_aso_ct_pool *
12455 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12456                        struct mlx5_aso_ct_action **ct_free)
12457 {
12458         struct mlx5_priv *priv = dev->data->dev_private;
12459         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12460         struct mlx5_aso_ct_pool *pool = NULL;
12461         struct mlx5_devx_obj *obj = NULL;
12462         uint32_t i;
12463         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12464
12465         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12466                                                           priv->sh->cdev->pdn,
12467                                                           log_obj_size);
12468         if (!obj) {
12469                 rte_errno = ENODATA;
12470                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12471                 return NULL;
12472         }
12473         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12474         if (!pool) {
12475                 rte_errno = ENOMEM;
12476                 claim_zero(mlx5_devx_cmd_destroy(obj));
12477                 return NULL;
12478         }
12479         pool->devx_obj = obj;
12480         pool->index = mng->next;
12481         /* Resize pools array if there is no room for the new pool in it. */
12482         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12483                 claim_zero(mlx5_devx_cmd_destroy(obj));
12484                 mlx5_free(pool);
12485                 return NULL;
12486         }
12487         mng->pools[pool->index] = pool;
12488         mng->next++;
12489         /* Assign the first action in the new pool, the rest go to free list. */
12490         *ct_free = &pool->actions[0];
12491         /* Lock outside, the list operation is safe here. */
12492         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12493                 /* refcnt is 0 when allocating the memory. */
12494                 pool->actions[i].offset = i;
12495                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12496         }
12497         return pool;
12498 }
12499
12500 /*
12501  * Allocate a ASO CT action from free list.
12502  *
12503  * @param[in] dev
12504  *   Pointer to the Ethernet device structure.
12505  * @param[out] error
12506  *   Pointer to the error structure.
12507  *
12508  * @return
12509  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12510  */
12511 static uint32_t
12512 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12513 {
12514         struct mlx5_priv *priv = dev->data->dev_private;
12515         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12516         struct mlx5_aso_ct_action *ct = NULL;
12517         struct mlx5_aso_ct_pool *pool;
12518         uint8_t reg_c;
12519         uint32_t ct_idx;
12520
12521         MLX5_ASSERT(mng);
12522         if (!priv->sh->cdev->config.devx) {
12523                 rte_errno = ENOTSUP;
12524                 return 0;
12525         }
12526         /* Get a free CT action, if no, a new pool will be created. */
12527         rte_spinlock_lock(&mng->ct_sl);
12528         ct = LIST_FIRST(&mng->free_cts);
12529         if (ct) {
12530                 LIST_REMOVE(ct, next);
12531         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12532                 rte_spinlock_unlock(&mng->ct_sl);
12533                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12534                                    NULL, "failed to create ASO CT pool");
12535                 return 0;
12536         }
12537         rte_spinlock_unlock(&mng->ct_sl);
12538         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12539         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12540         /* 0: inactive, 1: created, 2+: used by flows. */
12541         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12542         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12543         if (!ct->dr_action_orig) {
12544 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12545                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12546                         (priv->sh->rx_domain, pool->devx_obj->obj,
12547                          ct->offset,
12548                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12549                          reg_c - REG_C_0);
12550 #else
12551                 RTE_SET_USED(reg_c);
12552 #endif
12553                 if (!ct->dr_action_orig) {
12554                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12555                         rte_flow_error_set(error, rte_errno,
12556                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12557                                            "failed to create ASO CT action");
12558                         return 0;
12559                 }
12560         }
12561         if (!ct->dr_action_rply) {
12562 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12563                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12564                         (priv->sh->rx_domain, pool->devx_obj->obj,
12565                          ct->offset,
12566                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12567                          reg_c - REG_C_0);
12568 #endif
12569                 if (!ct->dr_action_rply) {
12570                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12571                         rte_flow_error_set(error, rte_errno,
12572                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12573                                            "failed to create ASO CT action");
12574                         return 0;
12575                 }
12576         }
12577         return ct_idx;
12578 }
12579
12580 /*
12581  * Create a conntrack object with context and actions by using ASO mechanism.
12582  *
12583  * @param[in] dev
12584  *   Pointer to rte_eth_dev structure.
12585  * @param[in] pro
12586  *   Pointer to conntrack information profile.
12587  * @param[out] error
12588  *   Pointer to the error structure.
12589  *
12590  * @return
12591  *   Index to conntrack object on success, 0 otherwise.
12592  */
12593 static uint32_t
12594 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12595                                    const struct rte_flow_action_conntrack *pro,
12596                                    struct rte_flow_error *error)
12597 {
12598         struct mlx5_priv *priv = dev->data->dev_private;
12599         struct mlx5_dev_ctx_shared *sh = priv->sh;
12600         struct mlx5_aso_ct_action *ct;
12601         uint32_t idx;
12602
12603         if (!sh->ct_aso_en)
12604                 return rte_flow_error_set(error, ENOTSUP,
12605                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12606                                           "Connection is not supported");
12607         idx = flow_dv_aso_ct_alloc(dev, error);
12608         if (!idx)
12609                 return rte_flow_error_set(error, rte_errno,
12610                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12611                                           "Failed to allocate CT object");
12612         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12613         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12614                 return rte_flow_error_set(error, EBUSY,
12615                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12616                                           "Failed to update CT");
12617         ct->is_original = !!pro->is_original_dir;
12618         ct->peer = pro->peer_port;
12619         return idx;
12620 }
12621
12622 /**
12623  * Fill the flow with DV spec, lock free
12624  * (mutex should be acquired by caller).
12625  *
12626  * @param[in] dev
12627  *   Pointer to rte_eth_dev structure.
12628  * @param[in, out] dev_flow
12629  *   Pointer to the sub flow.
12630  * @param[in] attr
12631  *   Pointer to the flow attributes.
12632  * @param[in] items
12633  *   Pointer to the list of items.
12634  * @param[in] actions
12635  *   Pointer to the list of actions.
12636  * @param[out] error
12637  *   Pointer to the error structure.
12638  *
12639  * @return
12640  *   0 on success, a negative errno value otherwise and rte_errno is set.
12641  */
12642 static int
12643 flow_dv_translate(struct rte_eth_dev *dev,
12644                   struct mlx5_flow *dev_flow,
12645                   const struct rte_flow_attr *attr,
12646                   const struct rte_flow_item items[],
12647                   const struct rte_flow_action actions[],
12648                   struct rte_flow_error *error)
12649 {
12650         struct mlx5_priv *priv = dev->data->dev_private;
12651         struct mlx5_sh_config *dev_conf = &priv->sh->config;
12652         struct rte_flow *flow = dev_flow->flow;
12653         struct mlx5_flow_handle *handle = dev_flow->handle;
12654         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12655         struct mlx5_flow_rss_desc *rss_desc;
12656         uint64_t item_flags = 0;
12657         uint64_t last_item = 0;
12658         uint64_t action_flags = 0;
12659         struct mlx5_flow_dv_matcher matcher = {
12660                 .mask = {
12661                         .size = sizeof(matcher.mask.buf),
12662                 },
12663         };
12664         int actions_n = 0;
12665         bool actions_end = false;
12666         union {
12667                 struct mlx5_flow_dv_modify_hdr_resource res;
12668                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12669                             sizeof(struct mlx5_modification_cmd) *
12670                             (MLX5_MAX_MODIFY_NUM + 1)];
12671         } mhdr_dummy;
12672         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12673         const struct rte_flow_action_count *count = NULL;
12674         const struct rte_flow_action_age *non_shared_age = NULL;
12675         union flow_dv_attr flow_attr = { .attr = 0 };
12676         uint32_t tag_be;
12677         union mlx5_flow_tbl_key tbl_key;
12678         uint32_t modify_action_position = UINT32_MAX;
12679         void *match_mask = matcher.mask.buf;
12680         void *match_value = dev_flow->dv.value.buf;
12681         uint8_t next_protocol = 0xff;
12682         struct rte_vlan_hdr vlan = { 0 };
12683         struct mlx5_flow_dv_dest_array_resource mdest_res;
12684         struct mlx5_flow_dv_sample_resource sample_res;
12685         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12686         const struct rte_flow_action_sample *sample = NULL;
12687         struct mlx5_flow_sub_actions_list *sample_act;
12688         uint32_t sample_act_pos = UINT32_MAX;
12689         uint32_t age_act_pos = UINT32_MAX;
12690         uint32_t num_of_dest = 0;
12691         int tmp_actions_n = 0;
12692         uint32_t table;
12693         int ret = 0;
12694         const struct mlx5_flow_tunnel *tunnel = NULL;
12695         struct flow_grp_info grp_info = {
12696                 .external = !!dev_flow->external,
12697                 .transfer = !!attr->transfer,
12698                 .fdb_def_rule = !!priv->fdb_def_rule,
12699                 .skip_scale = dev_flow->skip_scale &
12700                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12701                 .std_tbl_fix = true,
12702         };
12703         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12704         const struct rte_flow_item *tunnel_item = NULL;
12705
12706         if (!wks)
12707                 return rte_flow_error_set(error, ENOMEM,
12708                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12709                                           NULL,
12710                                           "failed to push flow workspace");
12711         rss_desc = &wks->rss_desc;
12712         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12713         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12714         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12715                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12716         /* update normal path action resource into last index of array */
12717         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12718         if (is_tunnel_offload_active(dev)) {
12719                 if (dev_flow->tunnel) {
12720                         RTE_VERIFY(dev_flow->tof_type ==
12721                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12722                         tunnel = dev_flow->tunnel;
12723                 } else {
12724                         tunnel = mlx5_get_tof(items, actions,
12725                                               &dev_flow->tof_type);
12726                         dev_flow->tunnel = tunnel;
12727                 }
12728                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12729                                         (dev, attr, tunnel, dev_flow->tof_type);
12730         }
12731         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12732                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12733         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12734                                        &grp_info, error);
12735         if (ret)
12736                 return ret;
12737         dev_flow->dv.group = table;
12738         if (attr->transfer)
12739                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12740         /* number of actions must be set to 0 in case of dirty stack. */
12741         mhdr_res->actions_num = 0;
12742         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12743                 /*
12744                  * do not add decap action if match rule drops packet
12745                  * HW rejects rules with decap & drop
12746                  *
12747                  * if tunnel match rule was inserted before matching tunnel set
12748                  * rule flow table used in the match rule must be registered.
12749                  * current implementation handles that in the
12750                  * flow_dv_match_register() at the function end.
12751                  */
12752                 bool add_decap = true;
12753                 const struct rte_flow_action *ptr = actions;
12754
12755                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12756                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12757                                 add_decap = false;
12758                                 break;
12759                         }
12760                 }
12761                 if (add_decap) {
12762                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12763                                                            attr->transfer,
12764                                                            error))
12765                                 return -rte_errno;
12766                         dev_flow->dv.actions[actions_n++] =
12767                                         dev_flow->dv.encap_decap->action;
12768                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12769                 }
12770         }
12771         for (; !actions_end ; actions++) {
12772                 const struct rte_flow_action_queue *queue;
12773                 const struct rte_flow_action_rss *rss;
12774                 const struct rte_flow_action *action = actions;
12775                 const uint8_t *rss_key;
12776                 struct mlx5_flow_tbl_resource *tbl;
12777                 struct mlx5_aso_age_action *age_act;
12778                 struct mlx5_flow_counter *cnt_act;
12779                 uint32_t port_id = 0;
12780                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12781                 int action_type = actions->type;
12782                 const struct rte_flow_action *found_action = NULL;
12783                 uint32_t jump_group = 0;
12784                 uint32_t owner_idx;
12785                 struct mlx5_aso_ct_action *ct;
12786
12787                 if (!mlx5_flow_os_action_supported(action_type))
12788                         return rte_flow_error_set(error, ENOTSUP,
12789                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12790                                                   actions,
12791                                                   "action not supported");
12792                 switch (action_type) {
12793                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12794                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12795                         break;
12796                 case RTE_FLOW_ACTION_TYPE_VOID:
12797                         break;
12798                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12799                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12800                         if (flow_dv_translate_action_port_id(dev, action,
12801                                                              &port_id, error))
12802                                 return -rte_errno;
12803                         port_id_resource.port_id = port_id;
12804                         MLX5_ASSERT(!handle->rix_port_id_action);
12805                         if (flow_dv_port_id_action_resource_register
12806                             (dev, &port_id_resource, dev_flow, error))
12807                                 return -rte_errno;
12808                         dev_flow->dv.actions[actions_n++] =
12809                                         dev_flow->dv.port_id_action->action;
12810                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12811                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12812                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12813                         num_of_dest++;
12814                         break;
12815                 case RTE_FLOW_ACTION_TYPE_FLAG:
12816                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12817                         wks->mark = 1;
12818                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12819                                 struct rte_flow_action_mark mark = {
12820                                         .id = MLX5_FLOW_MARK_DEFAULT,
12821                                 };
12822
12823                                 if (flow_dv_convert_action_mark(dev, &mark,
12824                                                                 mhdr_res,
12825                                                                 error))
12826                                         return -rte_errno;
12827                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12828                                 break;
12829                         }
12830                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12831                         /*
12832                          * Only one FLAG or MARK is supported per device flow
12833                          * right now. So the pointer to the tag resource must be
12834                          * zero before the register process.
12835                          */
12836                         MLX5_ASSERT(!handle->dvh.rix_tag);
12837                         if (flow_dv_tag_resource_register(dev, tag_be,
12838                                                           dev_flow, error))
12839                                 return -rte_errno;
12840                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12841                         dev_flow->dv.actions[actions_n++] =
12842                                         dev_flow->dv.tag_resource->action;
12843                         break;
12844                 case RTE_FLOW_ACTION_TYPE_MARK:
12845                         action_flags |= MLX5_FLOW_ACTION_MARK;
12846                         wks->mark = 1;
12847                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12848                                 const struct rte_flow_action_mark *mark =
12849                                         (const struct rte_flow_action_mark *)
12850                                                 actions->conf;
12851
12852                                 if (flow_dv_convert_action_mark(dev, mark,
12853                                                                 mhdr_res,
12854                                                                 error))
12855                                         return -rte_errno;
12856                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12857                                 break;
12858                         }
12859                         /* Fall-through */
12860                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12861                         /* Legacy (non-extensive) MARK action. */
12862                         tag_be = mlx5_flow_mark_set
12863                               (((const struct rte_flow_action_mark *)
12864                                (actions->conf))->id);
12865                         MLX5_ASSERT(!handle->dvh.rix_tag);
12866                         if (flow_dv_tag_resource_register(dev, tag_be,
12867                                                           dev_flow, error))
12868                                 return -rte_errno;
12869                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12870                         dev_flow->dv.actions[actions_n++] =
12871                                         dev_flow->dv.tag_resource->action;
12872                         break;
12873                 case RTE_FLOW_ACTION_TYPE_SET_META:
12874                         if (flow_dv_convert_action_set_meta
12875                                 (dev, mhdr_res, attr,
12876                                  (const struct rte_flow_action_set_meta *)
12877                                   actions->conf, error))
12878                                 return -rte_errno;
12879                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12880                         break;
12881                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12882                         if (flow_dv_convert_action_set_tag
12883                                 (dev, mhdr_res,
12884                                  (const struct rte_flow_action_set_tag *)
12885                                   actions->conf, error))
12886                                 return -rte_errno;
12887                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12888                         break;
12889                 case RTE_FLOW_ACTION_TYPE_DROP:
12890                         action_flags |= MLX5_FLOW_ACTION_DROP;
12891                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12892                         break;
12893                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12894                         queue = actions->conf;
12895                         rss_desc->queue_num = 1;
12896                         rss_desc->queue[0] = queue->index;
12897                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12898                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12899                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12900                         num_of_dest++;
12901                         break;
12902                 case RTE_FLOW_ACTION_TYPE_RSS:
12903                         rss = actions->conf;
12904                         memcpy(rss_desc->queue, rss->queue,
12905                                rss->queue_num * sizeof(uint16_t));
12906                         rss_desc->queue_num = rss->queue_num;
12907                         /* NULL RSS key indicates default RSS key. */
12908                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12909                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12910                         /*
12911                          * rss->level and rss.types should be set in advance
12912                          * when expanding items for RSS.
12913                          */
12914                         action_flags |= MLX5_FLOW_ACTION_RSS;
12915                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12916                                 MLX5_FLOW_FATE_SHARED_RSS :
12917                                 MLX5_FLOW_FATE_QUEUE;
12918                         break;
12919                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12920                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12921                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12922                         if (flow->age == 0) {
12923                                 flow->age = owner_idx;
12924                                 __atomic_fetch_add(&age_act->refcnt, 1,
12925                                                    __ATOMIC_RELAXED);
12926                         }
12927                         age_act_pos = actions_n++;
12928                         action_flags |= MLX5_FLOW_ACTION_AGE;
12929                         break;
12930                 case RTE_FLOW_ACTION_TYPE_AGE:
12931                         non_shared_age = action->conf;
12932                         age_act_pos = actions_n++;
12933                         action_flags |= MLX5_FLOW_ACTION_AGE;
12934                         break;
12935                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12936                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12937                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12938                                                              NULL);
12939                         MLX5_ASSERT(cnt_act != NULL);
12940                         /**
12941                          * When creating meter drop flow in drop table, the
12942                          * counter should not overwrite the rte flow counter.
12943                          */
12944                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12945                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12946                                 dev_flow->dv.actions[actions_n++] =
12947                                                         cnt_act->action;
12948                         } else {
12949                                 if (flow->counter == 0) {
12950                                         flow->counter = owner_idx;
12951                                         __atomic_fetch_add
12952                                                 (&cnt_act->shared_info.refcnt,
12953                                                  1, __ATOMIC_RELAXED);
12954                                 }
12955                                 /* Save information first, will apply later. */
12956                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12957                         }
12958                         break;
12959                 case RTE_FLOW_ACTION_TYPE_COUNT:
12960                         if (!priv->sh->cdev->config.devx) {
12961                                 return rte_flow_error_set
12962                                               (error, ENOTSUP,
12963                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12964                                                NULL,
12965                                                "count action not supported");
12966                         }
12967                         /* Save information first, will apply later. */
12968                         count = action->conf;
12969                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12970                         break;
12971                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12972                         dev_flow->dv.actions[actions_n++] =
12973                                                 priv->sh->pop_vlan_action;
12974                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12975                         break;
12976                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12977                         if (!(action_flags &
12978                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12979                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12980                         vlan.eth_proto = rte_be_to_cpu_16
12981                              ((((const struct rte_flow_action_of_push_vlan *)
12982                                                    actions->conf)->ethertype));
12983                         found_action = mlx5_flow_find_action
12984                                         (actions + 1,
12985                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12986                         if (found_action)
12987                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12988                         found_action = mlx5_flow_find_action
12989                                         (actions + 1,
12990                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12991                         if (found_action)
12992                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12993                         if (flow_dv_create_action_push_vlan
12994                                             (dev, attr, &vlan, dev_flow, error))
12995                                 return -rte_errno;
12996                         dev_flow->dv.actions[actions_n++] =
12997                                         dev_flow->dv.push_vlan_res->action;
12998                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12999                         break;
13000                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
13001                         /* of_vlan_push action handled this action */
13002                         MLX5_ASSERT(action_flags &
13003                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
13004                         break;
13005                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13006                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13007                                 break;
13008                         flow_dev_get_vlan_info_from_items(items, &vlan);
13009                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13010                         /* If no VLAN push - this is a modify header action */
13011                         if (flow_dv_convert_action_modify_vlan_vid
13012                                                 (mhdr_res, actions, error))
13013                                 return -rte_errno;
13014                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13015                         break;
13016                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13017                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13018                         if (flow_dv_create_action_l2_encap(dev, actions,
13019                                                            dev_flow,
13020                                                            attr->transfer,
13021                                                            error))
13022                                 return -rte_errno;
13023                         dev_flow->dv.actions[actions_n++] =
13024                                         dev_flow->dv.encap_decap->action;
13025                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13026                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13027                                 sample_act->action_flags |=
13028                                                         MLX5_FLOW_ACTION_ENCAP;
13029                         break;
13030                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13031                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13032                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13033                                                            attr->transfer,
13034                                                            error))
13035                                 return -rte_errno;
13036                         dev_flow->dv.actions[actions_n++] =
13037                                         dev_flow->dv.encap_decap->action;
13038                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13039                         break;
13040                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13041                         /* Handle encap with preceding decap. */
13042                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13043                                 if (flow_dv_create_action_raw_encap
13044                                         (dev, actions, dev_flow, attr, error))
13045                                         return -rte_errno;
13046                                 dev_flow->dv.actions[actions_n++] =
13047                                         dev_flow->dv.encap_decap->action;
13048                         } else {
13049                                 /* Handle encap without preceding decap. */
13050                                 if (flow_dv_create_action_l2_encap
13051                                     (dev, actions, dev_flow, attr->transfer,
13052                                      error))
13053                                         return -rte_errno;
13054                                 dev_flow->dv.actions[actions_n++] =
13055                                         dev_flow->dv.encap_decap->action;
13056                         }
13057                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13058                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13059                                 sample_act->action_flags |=
13060                                                         MLX5_FLOW_ACTION_ENCAP;
13061                         break;
13062                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13063                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13064                                 ;
13065                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13066                                 if (flow_dv_create_action_l2_decap
13067                                     (dev, dev_flow, attr->transfer, error))
13068                                         return -rte_errno;
13069                                 dev_flow->dv.actions[actions_n++] =
13070                                         dev_flow->dv.encap_decap->action;
13071                         }
13072                         /* If decap is followed by encap, handle it at encap. */
13073                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13074                         break;
13075                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13076                         dev_flow->dv.actions[actions_n++] =
13077                                 (void *)(uintptr_t)action->conf;
13078                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13079                         break;
13080                 case RTE_FLOW_ACTION_TYPE_JUMP:
13081                         jump_group = ((const struct rte_flow_action_jump *)
13082                                                         action->conf)->group;
13083                         grp_info.std_tbl_fix = 0;
13084                         if (dev_flow->skip_scale &
13085                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13086                                 grp_info.skip_scale = 1;
13087                         else
13088                                 grp_info.skip_scale = 0;
13089                         ret = mlx5_flow_group_to_table(dev, tunnel,
13090                                                        jump_group,
13091                                                        &table,
13092                                                        &grp_info, error);
13093                         if (ret)
13094                                 return ret;
13095                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13096                                                        attr->transfer,
13097                                                        !!dev_flow->external,
13098                                                        tunnel, jump_group, 0,
13099                                                        0, error);
13100                         if (!tbl)
13101                                 return rte_flow_error_set
13102                                                 (error, errno,
13103                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13104                                                  NULL,
13105                                                  "cannot create jump action.");
13106                         if (flow_dv_jump_tbl_resource_register
13107                             (dev, tbl, dev_flow, error)) {
13108                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13109                                 return rte_flow_error_set
13110                                                 (error, errno,
13111                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13112                                                  NULL,
13113                                                  "cannot create jump action.");
13114                         }
13115                         dev_flow->dv.actions[actions_n++] =
13116                                         dev_flow->dv.jump->action;
13117                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13118                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13119                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13120                         num_of_dest++;
13121                         break;
13122                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13123                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13124                         if (flow_dv_convert_action_modify_mac
13125                                         (mhdr_res, actions, error))
13126                                 return -rte_errno;
13127                         action_flags |= actions->type ==
13128                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13129                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13130                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13131                         break;
13132                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13133                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13134                         if (flow_dv_convert_action_modify_ipv4
13135                                         (mhdr_res, actions, error))
13136                                 return -rte_errno;
13137                         action_flags |= actions->type ==
13138                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13139                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13140                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13141                         break;
13142                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13143                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13144                         if (flow_dv_convert_action_modify_ipv6
13145                                         (mhdr_res, actions, error))
13146                                 return -rte_errno;
13147                         action_flags |= actions->type ==
13148                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13149                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13150                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13151                         break;
13152                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13153                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13154                         if (flow_dv_convert_action_modify_tp
13155                                         (mhdr_res, actions, items,
13156                                          &flow_attr, dev_flow, !!(action_flags &
13157                                          MLX5_FLOW_ACTION_DECAP), error))
13158                                 return -rte_errno;
13159                         action_flags |= actions->type ==
13160                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13161                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13162                                         MLX5_FLOW_ACTION_SET_TP_DST;
13163                         break;
13164                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13165                         if (flow_dv_convert_action_modify_dec_ttl
13166                                         (mhdr_res, items, &flow_attr, dev_flow,
13167                                          !!(action_flags &
13168                                          MLX5_FLOW_ACTION_DECAP), error))
13169                                 return -rte_errno;
13170                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13171                         break;
13172                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13173                         if (flow_dv_convert_action_modify_ttl
13174                                         (mhdr_res, actions, items, &flow_attr,
13175                                          dev_flow, !!(action_flags &
13176                                          MLX5_FLOW_ACTION_DECAP), error))
13177                                 return -rte_errno;
13178                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13179                         break;
13180                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13181                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13182                         if (flow_dv_convert_action_modify_tcp_seq
13183                                         (mhdr_res, actions, error))
13184                                 return -rte_errno;
13185                         action_flags |= actions->type ==
13186                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13187                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13188                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13189                         break;
13190
13191                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13192                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13193                         if (flow_dv_convert_action_modify_tcp_ack
13194                                         (mhdr_res, actions, error))
13195                                 return -rte_errno;
13196                         action_flags |= actions->type ==
13197                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13198                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13199                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13200                         break;
13201                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13202                         if (flow_dv_convert_action_set_reg
13203                                         (mhdr_res, actions, error))
13204                                 return -rte_errno;
13205                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13206                         break;
13207                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13208                         if (flow_dv_convert_action_copy_mreg
13209                                         (dev, mhdr_res, actions, error))
13210                                 return -rte_errno;
13211                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13212                         break;
13213                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13214                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13215                         dev_flow->handle->fate_action =
13216                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13217                         break;
13218                 case RTE_FLOW_ACTION_TYPE_METER:
13219                         if (!wks->fm)
13220                                 return rte_flow_error_set(error, rte_errno,
13221                                         RTE_FLOW_ERROR_TYPE_ACTION,
13222                                         NULL, "Failed to get meter in flow.");
13223                         /* Set the meter action. */
13224                         dev_flow->dv.actions[actions_n++] =
13225                                 wks->fm->meter_action;
13226                         action_flags |= MLX5_FLOW_ACTION_METER;
13227                         break;
13228                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13229                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13230                                                               actions, error))
13231                                 return -rte_errno;
13232                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13233                         break;
13234                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13235                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13236                                                               actions, error))
13237                                 return -rte_errno;
13238                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13239                         break;
13240                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13241                         sample_act_pos = actions_n;
13242                         sample = (const struct rte_flow_action_sample *)
13243                                  action->conf;
13244                         actions_n++;
13245                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13246                         /* put encap action into group if work with port id */
13247                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13248                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13249                                 sample_act->action_flags |=
13250                                                         MLX5_FLOW_ACTION_ENCAP;
13251                         break;
13252                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13253                         if (flow_dv_convert_action_modify_field
13254                                         (dev, mhdr_res, actions, attr, error))
13255                                 return -rte_errno;
13256                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13257                         break;
13258                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13259                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13260                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13261                         if (!ct)
13262                                 return rte_flow_error_set(error, EINVAL,
13263                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13264                                                 NULL,
13265                                                 "Failed to get CT object.");
13266                         if (mlx5_aso_ct_available(priv->sh, ct))
13267                                 return rte_flow_error_set(error, rte_errno,
13268                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13269                                                 NULL,
13270                                                 "CT is unavailable.");
13271                         if (ct->is_original)
13272                                 dev_flow->dv.actions[actions_n] =
13273                                                         ct->dr_action_orig;
13274                         else
13275                                 dev_flow->dv.actions[actions_n] =
13276                                                         ct->dr_action_rply;
13277                         if (flow->ct == 0) {
13278                                 flow->indirect_type =
13279                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13280                                 flow->ct = owner_idx;
13281                                 __atomic_fetch_add(&ct->refcnt, 1,
13282                                                    __ATOMIC_RELAXED);
13283                         }
13284                         actions_n++;
13285                         action_flags |= MLX5_FLOW_ACTION_CT;
13286                         break;
13287                 case RTE_FLOW_ACTION_TYPE_END:
13288                         actions_end = true;
13289                         if (mhdr_res->actions_num) {
13290                                 /* create modify action if needed. */
13291                                 if (flow_dv_modify_hdr_resource_register
13292                                         (dev, mhdr_res, dev_flow, error))
13293                                         return -rte_errno;
13294                                 dev_flow->dv.actions[modify_action_position] =
13295                                         handle->dvh.modify_hdr->action;
13296                         }
13297                         /*
13298                          * Handle AGE and COUNT action by single HW counter
13299                          * when they are not shared.
13300                          */
13301                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13302                                 if ((non_shared_age && count) ||
13303                                     !(priv->sh->flow_hit_aso_en &&
13304                                       (attr->group || attr->transfer))) {
13305                                         /* Creates age by counters. */
13306                                         cnt_act = flow_dv_prepare_counter
13307                                                                 (dev, dev_flow,
13308                                                                  flow, count,
13309                                                                  non_shared_age,
13310                                                                  error);
13311                                         if (!cnt_act)
13312                                                 return -rte_errno;
13313                                         dev_flow->dv.actions[age_act_pos] =
13314                                                                 cnt_act->action;
13315                                         break;
13316                                 }
13317                                 if (!flow->age && non_shared_age) {
13318                                         flow->age = flow_dv_aso_age_alloc
13319                                                                 (dev, error);
13320                                         if (!flow->age)
13321                                                 return -rte_errno;
13322                                         flow_dv_aso_age_params_init
13323                                                     (dev, flow->age,
13324                                                      non_shared_age->context ?
13325                                                      non_shared_age->context :
13326                                                      (void *)(uintptr_t)
13327                                                      (dev_flow->flow_idx),
13328                                                      non_shared_age->timeout);
13329                                 }
13330                                 age_act = flow_aso_age_get_by_idx(dev,
13331                                                                   flow->age);
13332                                 dev_flow->dv.actions[age_act_pos] =
13333                                                              age_act->dr_action;
13334                         }
13335                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13336                                 /*
13337                                  * Create one count action, to be used
13338                                  * by all sub-flows.
13339                                  */
13340                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13341                                                                   flow, count,
13342                                                                   NULL, error);
13343                                 if (!cnt_act)
13344                                         return -rte_errno;
13345                                 dev_flow->dv.actions[actions_n++] =
13346                                                                 cnt_act->action;
13347                         }
13348                 default:
13349                         break;
13350                 }
13351                 if (mhdr_res->actions_num &&
13352                     modify_action_position == UINT32_MAX)
13353                         modify_action_position = actions_n++;
13354         }
13355         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13356                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13357                 int item_type = items->type;
13358
13359                 if (!mlx5_flow_os_item_supported(item_type))
13360                         return rte_flow_error_set(error, ENOTSUP,
13361                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13362                                                   NULL, "item not supported");
13363                 switch (item_type) {
13364                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13365                         flow_dv_translate_item_port_id
13366                                 (dev, match_mask, match_value, items, attr);
13367                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13368                         break;
13369                 case RTE_FLOW_ITEM_TYPE_ETH:
13370                         flow_dv_translate_item_eth(match_mask, match_value,
13371                                                    items, tunnel,
13372                                                    dev_flow->dv.group);
13373                         matcher.priority = action_flags &
13374                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13375                                         !dev_flow->external ?
13376                                         MLX5_PRIORITY_MAP_L3 :
13377                                         MLX5_PRIORITY_MAP_L2;
13378                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13379                                              MLX5_FLOW_LAYER_OUTER_L2;
13380                         break;
13381                 case RTE_FLOW_ITEM_TYPE_VLAN:
13382                         flow_dv_translate_item_vlan(dev_flow,
13383                                                     match_mask, match_value,
13384                                                     items, tunnel,
13385                                                     dev_flow->dv.group);
13386                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13387                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13388                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13389                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13390                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13391                         break;
13392                 case RTE_FLOW_ITEM_TYPE_IPV4:
13393                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13394                                                   &item_flags, &tunnel);
13395                         flow_dv_translate_item_ipv4(match_mask, match_value,
13396                                                     items, tunnel,
13397                                                     dev_flow->dv.group);
13398                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13399                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13400                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13401                         if (items->mask != NULL &&
13402                             ((const struct rte_flow_item_ipv4 *)
13403                              items->mask)->hdr.next_proto_id) {
13404                                 next_protocol =
13405                                         ((const struct rte_flow_item_ipv4 *)
13406                                          (items->spec))->hdr.next_proto_id;
13407                                 next_protocol &=
13408                                         ((const struct rte_flow_item_ipv4 *)
13409                                          (items->mask))->hdr.next_proto_id;
13410                         } else {
13411                                 /* Reset for inner layer. */
13412                                 next_protocol = 0xff;
13413                         }
13414                         break;
13415                 case RTE_FLOW_ITEM_TYPE_IPV6:
13416                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13417                                                   &item_flags, &tunnel);
13418                         flow_dv_translate_item_ipv6(match_mask, match_value,
13419                                                     items, tunnel,
13420                                                     dev_flow->dv.group);
13421                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13422                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13423                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13424                         if (items->mask != NULL &&
13425                             ((const struct rte_flow_item_ipv6 *)
13426                              items->mask)->hdr.proto) {
13427                                 next_protocol =
13428                                         ((const struct rte_flow_item_ipv6 *)
13429                                          items->spec)->hdr.proto;
13430                                 next_protocol &=
13431                                         ((const struct rte_flow_item_ipv6 *)
13432                                          items->mask)->hdr.proto;
13433                         } else {
13434                                 /* Reset for inner layer. */
13435                                 next_protocol = 0xff;
13436                         }
13437                         break;
13438                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13439                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13440                                                              match_value,
13441                                                              items, tunnel);
13442                         last_item = tunnel ?
13443                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13444                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13445                         if (items->mask != NULL &&
13446                             ((const struct rte_flow_item_ipv6_frag_ext *)
13447                              items->mask)->hdr.next_header) {
13448                                 next_protocol =
13449                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13450                                  items->spec)->hdr.next_header;
13451                                 next_protocol &=
13452                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13453                                  items->mask)->hdr.next_header;
13454                         } else {
13455                                 /* Reset for inner layer. */
13456                                 next_protocol = 0xff;
13457                         }
13458                         break;
13459                 case RTE_FLOW_ITEM_TYPE_TCP:
13460                         flow_dv_translate_item_tcp(match_mask, match_value,
13461                                                    items, tunnel);
13462                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13463                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13464                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13465                         break;
13466                 case RTE_FLOW_ITEM_TYPE_UDP:
13467                         flow_dv_translate_item_udp(match_mask, match_value,
13468                                                    items, tunnel);
13469                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13470                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13471                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13472                         break;
13473                 case RTE_FLOW_ITEM_TYPE_GRE:
13474                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13475                         last_item = MLX5_FLOW_LAYER_GRE;
13476                         tunnel_item = items;
13477                         break;
13478                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13479                         flow_dv_translate_item_gre_key(match_mask,
13480                                                        match_value, items);
13481                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13482                         break;
13483                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13484                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13485                         last_item = MLX5_FLOW_LAYER_GRE;
13486                         tunnel_item = items;
13487                         break;
13488                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13489                         flow_dv_translate_item_vxlan(dev, attr,
13490                                                      match_mask, match_value,
13491                                                      items, tunnel);
13492                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13493                         last_item = MLX5_FLOW_LAYER_VXLAN;
13494                         break;
13495                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13496                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13497                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13498                         tunnel_item = items;
13499                         break;
13500                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13501                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13502                         last_item = MLX5_FLOW_LAYER_GENEVE;
13503                         tunnel_item = items;
13504                         break;
13505                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13506                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13507                                                           match_value,
13508                                                           items, error);
13509                         if (ret)
13510                                 return rte_flow_error_set(error, -ret,
13511                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13512                                         "cannot create GENEVE TLV option");
13513                         flow->geneve_tlv_option = 1;
13514                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13515                         break;
13516                 case RTE_FLOW_ITEM_TYPE_MPLS:
13517                         flow_dv_translate_item_mpls(match_mask, match_value,
13518                                                     items, last_item, tunnel);
13519                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13520                         last_item = MLX5_FLOW_LAYER_MPLS;
13521                         break;
13522                 case RTE_FLOW_ITEM_TYPE_MARK:
13523                         flow_dv_translate_item_mark(dev, match_mask,
13524                                                     match_value, items);
13525                         last_item = MLX5_FLOW_ITEM_MARK;
13526                         break;
13527                 case RTE_FLOW_ITEM_TYPE_META:
13528                         flow_dv_translate_item_meta(dev, match_mask,
13529                                                     match_value, attr, items);
13530                         last_item = MLX5_FLOW_ITEM_METADATA;
13531                         break;
13532                 case RTE_FLOW_ITEM_TYPE_ICMP:
13533                         flow_dv_translate_item_icmp(match_mask, match_value,
13534                                                     items, tunnel);
13535                         last_item = MLX5_FLOW_LAYER_ICMP;
13536                         break;
13537                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13538                         flow_dv_translate_item_icmp6(match_mask, match_value,
13539                                                       items, tunnel);
13540                         last_item = MLX5_FLOW_LAYER_ICMP6;
13541                         break;
13542                 case RTE_FLOW_ITEM_TYPE_TAG:
13543                         flow_dv_translate_item_tag(dev, match_mask,
13544                                                    match_value, items);
13545                         last_item = MLX5_FLOW_ITEM_TAG;
13546                         break;
13547                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13548                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13549                                                         match_value, items);
13550                         last_item = MLX5_FLOW_ITEM_TAG;
13551                         break;
13552                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13553                         flow_dv_translate_item_tx_queue(dev, match_mask,
13554                                                         match_value,
13555                                                         items);
13556                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13557                         break;
13558                 case RTE_FLOW_ITEM_TYPE_GTP:
13559                         flow_dv_translate_item_gtp(match_mask, match_value,
13560                                                    items, tunnel);
13561                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13562                         last_item = MLX5_FLOW_LAYER_GTP;
13563                         break;
13564                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13565                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13566                                                           match_value,
13567                                                           items);
13568                         if (ret)
13569                                 return rte_flow_error_set(error, -ret,
13570                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13571                                         "cannot create GTP PSC item");
13572                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13573                         break;
13574                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13575                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13576                                 /* Create it only the first time to be used. */
13577                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13578                                 if (ret)
13579                                         return rte_flow_error_set
13580                                                 (error, -ret,
13581                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13582                                                 NULL,
13583                                                 "cannot create eCPRI parser");
13584                         }
13585                         flow_dv_translate_item_ecpri(dev, match_mask,
13586                                                      match_value, items,
13587                                                      last_item);
13588                         /* No other protocol should follow eCPRI layer. */
13589                         last_item = MLX5_FLOW_LAYER_ECPRI;
13590                         break;
13591                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13592                         flow_dv_translate_item_integrity(items, integrity_items,
13593                                                          &last_item);
13594                         break;
13595                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13596                         flow_dv_translate_item_aso_ct(dev, match_mask,
13597                                                       match_value, items);
13598                         break;
13599                 case RTE_FLOW_ITEM_TYPE_FLEX:
13600                         flow_dv_translate_item_flex(dev, match_mask,
13601                                                     match_value, items,
13602                                                     dev_flow, tunnel != 0);
13603                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13604                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13605                         break;
13606                 default:
13607                         break;
13608                 }
13609                 item_flags |= last_item;
13610         }
13611         /*
13612          * When E-Switch mode is enabled, we have two cases where we need to
13613          * set the source port manually.
13614          * The first one, is in case of Nic steering rule, and the second is
13615          * E-Switch rule where no port_id item was found. In both cases
13616          * the source port is set according the current port in use.
13617          */
13618         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode) {
13619                 if (flow_dv_translate_item_port_id(dev, match_mask,
13620                                                    match_value, NULL, attr))
13621                         return -rte_errno;
13622         }
13623         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13624                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13625                                                       integrity_items,
13626                                                       item_flags);
13627         }
13628         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13629                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13630                                                  tunnel_item, item_flags);
13631         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13632                 flow_dv_translate_item_geneve(match_mask, match_value,
13633                                               tunnel_item, item_flags);
13634         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13635                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13636                         flow_dv_translate_item_gre(match_mask, match_value,
13637                                                    tunnel_item, item_flags);
13638                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13639                         flow_dv_translate_item_nvgre(match_mask, match_value,
13640                                                      tunnel_item, item_flags);
13641                 else
13642                         MLX5_ASSERT(false);
13643         }
13644 #ifdef RTE_LIBRTE_MLX5_DEBUG
13645         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13646                                               dev_flow->dv.value.buf));
13647 #endif
13648         /*
13649          * Layers may be already initialized from prefix flow if this dev_flow
13650          * is the suffix flow.
13651          */
13652         handle->layers |= item_flags;
13653         if (action_flags & MLX5_FLOW_ACTION_RSS)
13654                 flow_dv_hashfields_set(dev_flow, rss_desc);
13655         /* If has RSS action in the sample action, the Sample/Mirror resource
13656          * should be registered after the hash filed be update.
13657          */
13658         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13659                 ret = flow_dv_translate_action_sample(dev,
13660                                                       sample,
13661                                                       dev_flow, attr,
13662                                                       &num_of_dest,
13663                                                       sample_actions,
13664                                                       &sample_res,
13665                                                       error);
13666                 if (ret < 0)
13667                         return ret;
13668                 ret = flow_dv_create_action_sample(dev,
13669                                                    dev_flow,
13670                                                    num_of_dest,
13671                                                    &sample_res,
13672                                                    &mdest_res,
13673                                                    sample_actions,
13674                                                    action_flags,
13675                                                    error);
13676                 if (ret < 0)
13677                         return rte_flow_error_set
13678                                                 (error, rte_errno,
13679                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13680                                                 NULL,
13681                                                 "cannot create sample action");
13682                 if (num_of_dest > 1) {
13683                         dev_flow->dv.actions[sample_act_pos] =
13684                         dev_flow->dv.dest_array_res->action;
13685                 } else {
13686                         dev_flow->dv.actions[sample_act_pos] =
13687                         dev_flow->dv.sample_res->verbs_action;
13688                 }
13689         }
13690         /*
13691          * For multiple destination (sample action with ratio=1), the encap
13692          * action and port id action will be combined into group action.
13693          * So need remove the original these actions in the flow and only
13694          * use the sample action instead of.
13695          */
13696         if (num_of_dest > 1 &&
13697             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13698                 int i;
13699                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13700
13701                 for (i = 0; i < actions_n; i++) {
13702                         if ((sample_act->dr_encap_action &&
13703                                 sample_act->dr_encap_action ==
13704                                 dev_flow->dv.actions[i]) ||
13705                                 (sample_act->dr_port_id_action &&
13706                                 sample_act->dr_port_id_action ==
13707                                 dev_flow->dv.actions[i]) ||
13708                                 (sample_act->dr_jump_action &&
13709                                 sample_act->dr_jump_action ==
13710                                 dev_flow->dv.actions[i]))
13711                                 continue;
13712                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13713                 }
13714                 memcpy((void *)dev_flow->dv.actions,
13715                                 (void *)temp_actions,
13716                                 tmp_actions_n * sizeof(void *));
13717                 actions_n = tmp_actions_n;
13718         }
13719         dev_flow->dv.actions_n = actions_n;
13720         dev_flow->act_flags = action_flags;
13721         if (wks->skip_matcher_reg)
13722                 return 0;
13723         /* Register matcher. */
13724         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13725                                     matcher.mask.size);
13726         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13727                                                      matcher.priority,
13728                                                      dev_flow->external);
13729         /**
13730          * When creating meter drop flow in drop table, using original
13731          * 5-tuple match, the matcher priority should be lower than
13732          * mtr_id matcher.
13733          */
13734         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13735             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13736             matcher.priority <= MLX5_REG_BITS)
13737                 matcher.priority += MLX5_REG_BITS;
13738         /* reserved field no needs to be set to 0 here. */
13739         tbl_key.is_fdb = attr->transfer;
13740         tbl_key.is_egress = attr->egress;
13741         tbl_key.level = dev_flow->dv.group;
13742         tbl_key.id = dev_flow->dv.table_id;
13743         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13744                                      tunnel, attr->group, error))
13745                 return -rte_errno;
13746         return 0;
13747 }
13748
13749 /**
13750  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13751  * and tunnel.
13752  *
13753  * @param[in, out] action
13754  *   Shred RSS action holding hash RX queue objects.
13755  * @param[in] hash_fields
13756  *   Defines combination of packet fields to participate in RX hash.
13757  * @param[in] tunnel
13758  *   Tunnel type
13759  * @param[in] hrxq_idx
13760  *   Hash RX queue index to set.
13761  *
13762  * @return
13763  *   0 on success, otherwise negative errno value.
13764  */
13765 static int
13766 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13767                               const uint64_t hash_fields,
13768                               uint32_t hrxq_idx)
13769 {
13770         uint32_t *hrxqs = action->hrxq;
13771
13772         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13773         case MLX5_RSS_HASH_IPV4:
13774                 /* fall-through. */
13775         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13776                 /* fall-through. */
13777         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13778                 hrxqs[0] = hrxq_idx;
13779                 return 0;
13780         case MLX5_RSS_HASH_IPV4_TCP:
13781                 /* fall-through. */
13782         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13783                 /* fall-through. */
13784         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13785                 hrxqs[1] = hrxq_idx;
13786                 return 0;
13787         case MLX5_RSS_HASH_IPV4_UDP:
13788                 /* fall-through. */
13789         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13790                 /* fall-through. */
13791         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13792                 hrxqs[2] = hrxq_idx;
13793                 return 0;
13794         case MLX5_RSS_HASH_IPV6:
13795                 /* fall-through. */
13796         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13797                 /* fall-through. */
13798         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13799                 hrxqs[3] = hrxq_idx;
13800                 return 0;
13801         case MLX5_RSS_HASH_IPV6_TCP:
13802                 /* fall-through. */
13803         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13804                 /* fall-through. */
13805         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13806                 hrxqs[4] = hrxq_idx;
13807                 return 0;
13808         case MLX5_RSS_HASH_IPV6_UDP:
13809                 /* fall-through. */
13810         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13811                 /* fall-through. */
13812         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13813                 hrxqs[5] = hrxq_idx;
13814                 return 0;
13815         case MLX5_RSS_HASH_NONE:
13816                 hrxqs[6] = hrxq_idx;
13817                 return 0;
13818         default:
13819                 return -1;
13820         }
13821 }
13822
13823 /**
13824  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13825  * and tunnel.
13826  *
13827  * @param[in] dev
13828  *   Pointer to the Ethernet device structure.
13829  * @param[in] idx
13830  *   Shared RSS action ID holding hash RX queue objects.
13831  * @param[in] hash_fields
13832  *   Defines combination of packet fields to participate in RX hash.
13833  * @param[in] tunnel
13834  *   Tunnel type
13835  *
13836  * @return
13837  *   Valid hash RX queue index, otherwise 0.
13838  */
13839 static uint32_t
13840 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13841                                  const uint64_t hash_fields)
13842 {
13843         struct mlx5_priv *priv = dev->data->dev_private;
13844         struct mlx5_shared_action_rss *shared_rss =
13845             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13846         const uint32_t *hrxqs = shared_rss->hrxq;
13847
13848         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13849         case MLX5_RSS_HASH_IPV4:
13850                 /* fall-through. */
13851         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13852                 /* fall-through. */
13853         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13854                 return hrxqs[0];
13855         case MLX5_RSS_HASH_IPV4_TCP:
13856                 /* fall-through. */
13857         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13858                 /* fall-through. */
13859         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13860                 return hrxqs[1];
13861         case MLX5_RSS_HASH_IPV4_UDP:
13862                 /* fall-through. */
13863         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13864                 /* fall-through. */
13865         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13866                 return hrxqs[2];
13867         case MLX5_RSS_HASH_IPV6:
13868                 /* fall-through. */
13869         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13870                 /* fall-through. */
13871         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13872                 return hrxqs[3];
13873         case MLX5_RSS_HASH_IPV6_TCP:
13874                 /* fall-through. */
13875         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13876                 /* fall-through. */
13877         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13878                 return hrxqs[4];
13879         case MLX5_RSS_HASH_IPV6_UDP:
13880                 /* fall-through. */
13881         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13882                 /* fall-through. */
13883         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13884                 return hrxqs[5];
13885         case MLX5_RSS_HASH_NONE:
13886                 return hrxqs[6];
13887         default:
13888                 return 0;
13889         }
13890
13891 }
13892
13893 /**
13894  * Apply the flow to the NIC, lock free,
13895  * (mutex should be acquired by caller).
13896  *
13897  * @param[in] dev
13898  *   Pointer to the Ethernet device structure.
13899  * @param[in, out] flow
13900  *   Pointer to flow structure.
13901  * @param[out] error
13902  *   Pointer to error structure.
13903  *
13904  * @return
13905  *   0 on success, a negative errno value otherwise and rte_errno is set.
13906  */
13907 static int
13908 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13909               struct rte_flow_error *error)
13910 {
13911         struct mlx5_flow_dv_workspace *dv;
13912         struct mlx5_flow_handle *dh;
13913         struct mlx5_flow_handle_dv *dv_h;
13914         struct mlx5_flow *dev_flow;
13915         struct mlx5_priv *priv = dev->data->dev_private;
13916         uint32_t handle_idx;
13917         int n;
13918         int err;
13919         int idx;
13920         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13921         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13922         uint8_t misc_mask;
13923
13924         MLX5_ASSERT(wks);
13925         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13926                 dev_flow = &wks->flows[idx];
13927                 dv = &dev_flow->dv;
13928                 dh = dev_flow->handle;
13929                 dv_h = &dh->dvh;
13930                 n = dv->actions_n;
13931                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13932                         if (dv->transfer) {
13933                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13934                                 dv->actions[n++] = priv->sh->dr_drop_action;
13935                         } else {
13936 #ifdef HAVE_MLX5DV_DR
13937                                 /* DR supports drop action placeholder. */
13938                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13939                                 dv->actions[n++] = dv->group ?
13940                                         priv->sh->dr_drop_action :
13941                                         priv->root_drop_action;
13942 #else
13943                                 /* For DV we use the explicit drop queue. */
13944                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13945                                 dv->actions[n++] =
13946                                                 priv->drop_queue.hrxq->action;
13947 #endif
13948                         }
13949                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13950                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13951                         struct mlx5_hrxq *hrxq;
13952                         uint32_t hrxq_idx;
13953
13954                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13955                                                     &hrxq_idx);
13956                         if (!hrxq) {
13957                                 rte_flow_error_set
13958                                         (error, rte_errno,
13959                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13960                                          "cannot get hash queue");
13961                                 goto error;
13962                         }
13963                         dh->rix_hrxq = hrxq_idx;
13964                         dv->actions[n++] = hrxq->action;
13965                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13966                         struct mlx5_hrxq *hrxq = NULL;
13967                         uint32_t hrxq_idx;
13968
13969                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13970                                                 rss_desc->shared_rss,
13971                                                 dev_flow->hash_fields);
13972                         if (hrxq_idx)
13973                                 hrxq = mlx5_ipool_get
13974                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13975                                          hrxq_idx);
13976                         if (!hrxq) {
13977                                 rte_flow_error_set
13978                                         (error, rte_errno,
13979                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13980                                          "cannot get hash queue");
13981                                 goto error;
13982                         }
13983                         dh->rix_srss = rss_desc->shared_rss;
13984                         dv->actions[n++] = hrxq->action;
13985                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13986                         if (!priv->sh->default_miss_action) {
13987                                 rte_flow_error_set
13988                                         (error, rte_errno,
13989                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13990                                          "default miss action not be created.");
13991                                 goto error;
13992                         }
13993                         dv->actions[n++] = priv->sh->default_miss_action;
13994                 }
13995                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13996                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13997                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13998                                                (void *)&dv->value, n,
13999                                                dv->actions, &dh->drv_flow);
14000                 if (err) {
14001                         rte_flow_error_set
14002                                 (error, errno,
14003                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14004                                 NULL,
14005                                 (!priv->sh->config.allow_duplicate_pattern &&
14006                                 errno == EEXIST) ?
14007                                 "duplicating pattern is not allowed" :
14008                                 "hardware refuses to create flow");
14009                         goto error;
14010                 }
14011                 if (priv->vmwa_context &&
14012                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14013                         /*
14014                          * The rule contains the VLAN pattern.
14015                          * For VF we are going to create VLAN
14016                          * interface to make hypervisor set correct
14017                          * e-Switch vport context.
14018                          */
14019                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14020                 }
14021         }
14022         return 0;
14023 error:
14024         err = rte_errno; /* Save rte_errno before cleanup. */
14025         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14026                        handle_idx, dh, next) {
14027                 /* hrxq is union, don't clear it if the flag is not set. */
14028                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14029                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14030                         dh->rix_hrxq = 0;
14031                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14032                         dh->rix_srss = 0;
14033                 }
14034                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14035                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14036         }
14037         rte_errno = err; /* Restore rte_errno. */
14038         return -rte_errno;
14039 }
14040
14041 void
14042 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14043                           struct mlx5_list_entry *entry)
14044 {
14045         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14046                                                              typeof(*resource),
14047                                                              entry);
14048
14049         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14050         mlx5_free(resource);
14051 }
14052
14053 /**
14054  * Release the flow matcher.
14055  *
14056  * @param dev
14057  *   Pointer to Ethernet device.
14058  * @param port_id
14059  *   Index to port ID action resource.
14060  *
14061  * @return
14062  *   1 while a reference on it exists, 0 when freed.
14063  */
14064 static int
14065 flow_dv_matcher_release(struct rte_eth_dev *dev,
14066                         struct mlx5_flow_handle *handle)
14067 {
14068         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14069         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14070                                                             typeof(*tbl), tbl);
14071         int ret;
14072
14073         MLX5_ASSERT(matcher->matcher_object);
14074         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14075         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14076         return ret;
14077 }
14078
14079 void
14080 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14081 {
14082         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14083         struct mlx5_flow_dv_encap_decap_resource *res =
14084                                        container_of(entry, typeof(*res), entry);
14085
14086         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14087         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14088 }
14089
14090 /**
14091  * Release an encap/decap resource.
14092  *
14093  * @param dev
14094  *   Pointer to Ethernet device.
14095  * @param encap_decap_idx
14096  *   Index of encap decap resource.
14097  *
14098  * @return
14099  *   1 while a reference on it exists, 0 when freed.
14100  */
14101 static int
14102 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14103                                      uint32_t encap_decap_idx)
14104 {
14105         struct mlx5_priv *priv = dev->data->dev_private;
14106         struct mlx5_flow_dv_encap_decap_resource *resource;
14107
14108         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14109                                   encap_decap_idx);
14110         if (!resource)
14111                 return 0;
14112         MLX5_ASSERT(resource->action);
14113         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14114 }
14115
14116 /**
14117  * Release an jump to table action resource.
14118  *
14119  * @param dev
14120  *   Pointer to Ethernet device.
14121  * @param rix_jump
14122  *   Index to the jump action resource.
14123  *
14124  * @return
14125  *   1 while a reference on it exists, 0 when freed.
14126  */
14127 static int
14128 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14129                                   uint32_t rix_jump)
14130 {
14131         struct mlx5_priv *priv = dev->data->dev_private;
14132         struct mlx5_flow_tbl_data_entry *tbl_data;
14133
14134         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14135                                   rix_jump);
14136         if (!tbl_data)
14137                 return 0;
14138         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14139 }
14140
14141 void
14142 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14143 {
14144         struct mlx5_flow_dv_modify_hdr_resource *res =
14145                 container_of(entry, typeof(*res), entry);
14146         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14147
14148         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14149         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14150 }
14151
14152 /**
14153  * Release a modify-header resource.
14154  *
14155  * @param dev
14156  *   Pointer to Ethernet device.
14157  * @param handle
14158  *   Pointer to mlx5_flow_handle.
14159  *
14160  * @return
14161  *   1 while a reference on it exists, 0 when freed.
14162  */
14163 static int
14164 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14165                                     struct mlx5_flow_handle *handle)
14166 {
14167         struct mlx5_priv *priv = dev->data->dev_private;
14168         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14169
14170         MLX5_ASSERT(entry->action);
14171         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14172 }
14173
14174 void
14175 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14176 {
14177         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14178         struct mlx5_flow_dv_port_id_action_resource *resource =
14179                                   container_of(entry, typeof(*resource), entry);
14180
14181         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14182         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14183 }
14184
14185 /**
14186  * Release port ID action resource.
14187  *
14188  * @param dev
14189  *   Pointer to Ethernet device.
14190  * @param handle
14191  *   Pointer to mlx5_flow_handle.
14192  *
14193  * @return
14194  *   1 while a reference on it exists, 0 when freed.
14195  */
14196 static int
14197 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14198                                         uint32_t port_id)
14199 {
14200         struct mlx5_priv *priv = dev->data->dev_private;
14201         struct mlx5_flow_dv_port_id_action_resource *resource;
14202
14203         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14204         if (!resource)
14205                 return 0;
14206         MLX5_ASSERT(resource->action);
14207         return mlx5_list_unregister(priv->sh->port_id_action_list,
14208                                     &resource->entry);
14209 }
14210
14211 /**
14212  * Release shared RSS action resource.
14213  *
14214  * @param dev
14215  *   Pointer to Ethernet device.
14216  * @param srss
14217  *   Shared RSS action index.
14218  */
14219 static void
14220 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14221 {
14222         struct mlx5_priv *priv = dev->data->dev_private;
14223         struct mlx5_shared_action_rss *shared_rss;
14224
14225         shared_rss = mlx5_ipool_get
14226                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14227         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14228 }
14229
14230 void
14231 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14232 {
14233         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14234         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14235                         container_of(entry, typeof(*resource), entry);
14236
14237         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14238         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14239 }
14240
14241 /**
14242  * Release push vlan action resource.
14243  *
14244  * @param dev
14245  *   Pointer to Ethernet device.
14246  * @param handle
14247  *   Pointer to mlx5_flow_handle.
14248  *
14249  * @return
14250  *   1 while a reference on it exists, 0 when freed.
14251  */
14252 static int
14253 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14254                                           struct mlx5_flow_handle *handle)
14255 {
14256         struct mlx5_priv *priv = dev->data->dev_private;
14257         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14258         uint32_t idx = handle->dvh.rix_push_vlan;
14259
14260         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14261         if (!resource)
14262                 return 0;
14263         MLX5_ASSERT(resource->action);
14264         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14265                                     &resource->entry);
14266 }
14267
14268 /**
14269  * Release the fate resource.
14270  *
14271  * @param dev
14272  *   Pointer to Ethernet device.
14273  * @param handle
14274  *   Pointer to mlx5_flow_handle.
14275  */
14276 static void
14277 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14278                                struct mlx5_flow_handle *handle)
14279 {
14280         if (!handle->rix_fate)
14281                 return;
14282         switch (handle->fate_action) {
14283         case MLX5_FLOW_FATE_QUEUE:
14284                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14285                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14286                 break;
14287         case MLX5_FLOW_FATE_JUMP:
14288                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14289                 break;
14290         case MLX5_FLOW_FATE_PORT_ID:
14291                 flow_dv_port_id_action_resource_release(dev,
14292                                 handle->rix_port_id_action);
14293                 break;
14294         default:
14295                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14296                 break;
14297         }
14298         handle->rix_fate = 0;
14299 }
14300
14301 void
14302 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14303                          struct mlx5_list_entry *entry)
14304 {
14305         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14306                                                               typeof(*resource),
14307                                                               entry);
14308         struct rte_eth_dev *dev = resource->dev;
14309         struct mlx5_priv *priv = dev->data->dev_private;
14310
14311         if (resource->verbs_action)
14312                 claim_zero(mlx5_flow_os_destroy_flow_action
14313                                                       (resource->verbs_action));
14314         if (resource->normal_path_tbl)
14315                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14316                                              resource->normal_path_tbl);
14317         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14318         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14319         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14320 }
14321
14322 /**
14323  * Release an sample resource.
14324  *
14325  * @param dev
14326  *   Pointer to Ethernet device.
14327  * @param handle
14328  *   Pointer to mlx5_flow_handle.
14329  *
14330  * @return
14331  *   1 while a reference on it exists, 0 when freed.
14332  */
14333 static int
14334 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14335                                      struct mlx5_flow_handle *handle)
14336 {
14337         struct mlx5_priv *priv = dev->data->dev_private;
14338         struct mlx5_flow_dv_sample_resource *resource;
14339
14340         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14341                                   handle->dvh.rix_sample);
14342         if (!resource)
14343                 return 0;
14344         MLX5_ASSERT(resource->verbs_action);
14345         return mlx5_list_unregister(priv->sh->sample_action_list,
14346                                     &resource->entry);
14347 }
14348
14349 void
14350 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14351                              struct mlx5_list_entry *entry)
14352 {
14353         struct mlx5_flow_dv_dest_array_resource *resource =
14354                         container_of(entry, typeof(*resource), entry);
14355         struct rte_eth_dev *dev = resource->dev;
14356         struct mlx5_priv *priv = dev->data->dev_private;
14357         uint32_t i = 0;
14358
14359         MLX5_ASSERT(resource->action);
14360         if (resource->action)
14361                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14362         for (; i < resource->num_of_dest; i++)
14363                 flow_dv_sample_sub_actions_release(dev,
14364                                                    &resource->sample_idx[i]);
14365         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14366         DRV_LOG(DEBUG, "destination array resource %p: removed",
14367                 (void *)resource);
14368 }
14369
14370 /**
14371  * Release an destination array resource.
14372  *
14373  * @param dev
14374  *   Pointer to Ethernet device.
14375  * @param handle
14376  *   Pointer to mlx5_flow_handle.
14377  *
14378  * @return
14379  *   1 while a reference on it exists, 0 when freed.
14380  */
14381 static int
14382 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14383                                     struct mlx5_flow_handle *handle)
14384 {
14385         struct mlx5_priv *priv = dev->data->dev_private;
14386         struct mlx5_flow_dv_dest_array_resource *resource;
14387
14388         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14389                                   handle->dvh.rix_dest_array);
14390         if (!resource)
14391                 return 0;
14392         MLX5_ASSERT(resource->action);
14393         return mlx5_list_unregister(priv->sh->dest_array_list,
14394                                     &resource->entry);
14395 }
14396
14397 static void
14398 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14399 {
14400         struct mlx5_priv *priv = dev->data->dev_private;
14401         struct mlx5_dev_ctx_shared *sh = priv->sh;
14402         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14403                                 sh->geneve_tlv_option_resource;
14404         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14405         if (geneve_opt_resource) {
14406                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14407                                          __ATOMIC_RELAXED))) {
14408                         claim_zero(mlx5_devx_cmd_destroy
14409                                         (geneve_opt_resource->obj));
14410                         mlx5_free(sh->geneve_tlv_option_resource);
14411                         sh->geneve_tlv_option_resource = NULL;
14412                 }
14413         }
14414         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14415 }
14416
14417 /**
14418  * Remove the flow from the NIC but keeps it in memory.
14419  * Lock free, (mutex should be acquired by caller).
14420  *
14421  * @param[in] dev
14422  *   Pointer to Ethernet device.
14423  * @param[in, out] flow
14424  *   Pointer to flow structure.
14425  */
14426 static void
14427 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14428 {
14429         struct mlx5_flow_handle *dh;
14430         uint32_t handle_idx;
14431         struct mlx5_priv *priv = dev->data->dev_private;
14432
14433         if (!flow)
14434                 return;
14435         handle_idx = flow->dev_handles;
14436         while (handle_idx) {
14437                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14438                                     handle_idx);
14439                 if (!dh)
14440                         return;
14441                 if (dh->drv_flow) {
14442                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14443                         dh->drv_flow = NULL;
14444                 }
14445                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14446                         flow_dv_fate_resource_release(dev, dh);
14447                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14448                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14449                 handle_idx = dh->next.next;
14450         }
14451 }
14452
14453 /**
14454  * Remove the flow from the NIC and the memory.
14455  * Lock free, (mutex should be acquired by caller).
14456  *
14457  * @param[in] dev
14458  *   Pointer to the Ethernet device structure.
14459  * @param[in, out] flow
14460  *   Pointer to flow structure.
14461  */
14462 static void
14463 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14464 {
14465         struct mlx5_flow_handle *dev_handle;
14466         struct mlx5_priv *priv = dev->data->dev_private;
14467         struct mlx5_flow_meter_info *fm = NULL;
14468         uint32_t srss = 0;
14469
14470         if (!flow)
14471                 return;
14472         flow_dv_remove(dev, flow);
14473         if (flow->counter) {
14474                 flow_dv_counter_free(dev, flow->counter);
14475                 flow->counter = 0;
14476         }
14477         if (flow->meter) {
14478                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14479                 if (fm)
14480                         mlx5_flow_meter_detach(priv, fm);
14481                 flow->meter = 0;
14482         }
14483         /* Keep the current age handling by default. */
14484         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14485                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14486         else if (flow->age)
14487                 flow_dv_aso_age_release(dev, flow->age);
14488         if (flow->geneve_tlv_option) {
14489                 flow_dv_geneve_tlv_option_resource_release(dev);
14490                 flow->geneve_tlv_option = 0;
14491         }
14492         while (flow->dev_handles) {
14493                 uint32_t tmp_idx = flow->dev_handles;
14494
14495                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14496                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14497                 if (!dev_handle)
14498                         return;
14499                 flow->dev_handles = dev_handle->next.next;
14500                 while (dev_handle->flex_item) {
14501                         int index = rte_bsf32(dev_handle->flex_item);
14502
14503                         mlx5_flex_release_index(dev, index);
14504                         dev_handle->flex_item &= ~RTE_BIT32(index);
14505                 }
14506                 if (dev_handle->dvh.matcher)
14507                         flow_dv_matcher_release(dev, dev_handle);
14508                 if (dev_handle->dvh.rix_sample)
14509                         flow_dv_sample_resource_release(dev, dev_handle);
14510                 if (dev_handle->dvh.rix_dest_array)
14511                         flow_dv_dest_array_resource_release(dev, dev_handle);
14512                 if (dev_handle->dvh.rix_encap_decap)
14513                         flow_dv_encap_decap_resource_release(dev,
14514                                 dev_handle->dvh.rix_encap_decap);
14515                 if (dev_handle->dvh.modify_hdr)
14516                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14517                 if (dev_handle->dvh.rix_push_vlan)
14518                         flow_dv_push_vlan_action_resource_release(dev,
14519                                                                   dev_handle);
14520                 if (dev_handle->dvh.rix_tag)
14521                         flow_dv_tag_release(dev,
14522                                             dev_handle->dvh.rix_tag);
14523                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14524                         flow_dv_fate_resource_release(dev, dev_handle);
14525                 else if (!srss)
14526                         srss = dev_handle->rix_srss;
14527                 if (fm && dev_handle->is_meter_flow_id &&
14528                     dev_handle->split_flow_id)
14529                         mlx5_ipool_free(fm->flow_ipool,
14530                                         dev_handle->split_flow_id);
14531                 else if (dev_handle->split_flow_id &&
14532                     !dev_handle->is_meter_flow_id)
14533                         mlx5_ipool_free(priv->sh->ipool
14534                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14535                                         dev_handle->split_flow_id);
14536                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14537                            tmp_idx);
14538         }
14539         if (srss)
14540                 flow_dv_shared_rss_action_release(dev, srss);
14541 }
14542
14543 /**
14544  * Release array of hash RX queue objects.
14545  * Helper function.
14546  *
14547  * @param[in] dev
14548  *   Pointer to the Ethernet device structure.
14549  * @param[in, out] hrxqs
14550  *   Array of hash RX queue objects.
14551  *
14552  * @return
14553  *   Total number of references to hash RX queue objects in *hrxqs* array
14554  *   after this operation.
14555  */
14556 static int
14557 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14558                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14559 {
14560         size_t i;
14561         int remaining = 0;
14562
14563         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14564                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14565
14566                 if (!ret)
14567                         (*hrxqs)[i] = 0;
14568                 remaining += ret;
14569         }
14570         return remaining;
14571 }
14572
14573 /**
14574  * Release all hash RX queue objects representing shared RSS action.
14575  *
14576  * @param[in] dev
14577  *   Pointer to the Ethernet device structure.
14578  * @param[in, out] action
14579  *   Shared RSS action to remove hash RX queue objects from.
14580  *
14581  * @return
14582  *   Total number of references to hash RX queue objects stored in *action*
14583  *   after this operation.
14584  *   Expected to be 0 if no external references held.
14585  */
14586 static int
14587 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14588                                  struct mlx5_shared_action_rss *shared_rss)
14589 {
14590         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14591 }
14592
14593 /**
14594  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14595  * user input.
14596  *
14597  * Only one hash value is available for one L3+L4 combination:
14598  * for example:
14599  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14600  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14601  * same slot in mlx5_rss_hash_fields.
14602  *
14603  * @param[in] rss
14604  *   Pointer to the shared action RSS conf.
14605  * @param[in, out] hash_field
14606  *   hash_field variable needed to be adjusted.
14607  *
14608  * @return
14609  *   void
14610  */
14611 static void
14612 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14613                                      uint64_t *hash_field)
14614 {
14615         uint64_t rss_types = rss->origin.types;
14616
14617         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14618         case MLX5_RSS_HASH_IPV4:
14619                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14620                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14621                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14622                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14623                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14624                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14625                         else
14626                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14627                 }
14628                 return;
14629         case MLX5_RSS_HASH_IPV6:
14630                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14631                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14632                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14633                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14634                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14635                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14636                         else
14637                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14638                 }
14639                 return;
14640         case MLX5_RSS_HASH_IPV4_UDP:
14641                 /* fall-through. */
14642         case MLX5_RSS_HASH_IPV6_UDP:
14643                 if (rss_types & RTE_ETH_RSS_UDP) {
14644                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14645                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14646                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14647                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14648                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14649                         else
14650                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14651                 }
14652                 return;
14653         case MLX5_RSS_HASH_IPV4_TCP:
14654                 /* fall-through. */
14655         case MLX5_RSS_HASH_IPV6_TCP:
14656                 if (rss_types & RTE_ETH_RSS_TCP) {
14657                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14658                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14659                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14660                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14661                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14662                         else
14663                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14664                 }
14665                 return;
14666         default:
14667                 return;
14668         }
14669 }
14670
14671 /**
14672  * Setup shared RSS action.
14673  * Prepare set of hash RX queue objects sufficient to handle all valid
14674  * hash_fields combinations (see enum ibv_rx_hash_fields).
14675  *
14676  * @param[in] dev
14677  *   Pointer to the Ethernet device structure.
14678  * @param[in] action_idx
14679  *   Shared RSS action ipool index.
14680  * @param[in, out] action
14681  *   Partially initialized shared RSS action.
14682  * @param[out] error
14683  *   Perform verbose error reporting if not NULL. Initialized in case of
14684  *   error only.
14685  *
14686  * @return
14687  *   0 on success, otherwise negative errno value.
14688  */
14689 static int
14690 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14691                            uint32_t action_idx,
14692                            struct mlx5_shared_action_rss *shared_rss,
14693                            struct rte_flow_error *error)
14694 {
14695         struct mlx5_flow_rss_desc rss_desc = { 0 };
14696         size_t i;
14697         int err;
14698
14699         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
14700                                      !!dev->data->dev_started)) {
14701                 return rte_flow_error_set(error, rte_errno,
14702                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14703                                           "cannot setup indirection table");
14704         }
14705         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14706         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14707         rss_desc.const_q = shared_rss->origin.queue;
14708         rss_desc.queue_num = shared_rss->origin.queue_num;
14709         /* Set non-zero value to indicate a shared RSS. */
14710         rss_desc.shared_rss = action_idx;
14711         rss_desc.ind_tbl = shared_rss->ind_tbl;
14712         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14713                 uint32_t hrxq_idx;
14714                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14715                 int tunnel = 0;
14716
14717                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14718                 if (shared_rss->origin.level > 1) {
14719                         hash_fields |= IBV_RX_HASH_INNER;
14720                         tunnel = 1;
14721                 }
14722                 rss_desc.tunnel = tunnel;
14723                 rss_desc.hash_fields = hash_fields;
14724                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14725                 if (!hrxq_idx) {
14726                         rte_flow_error_set
14727                                 (error, rte_errno,
14728                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14729                                  "cannot get hash queue");
14730                         goto error_hrxq_new;
14731                 }
14732                 err = __flow_dv_action_rss_hrxq_set
14733                         (shared_rss, hash_fields, hrxq_idx);
14734                 MLX5_ASSERT(!err);
14735         }
14736         return 0;
14737 error_hrxq_new:
14738         err = rte_errno;
14739         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14740         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
14741                 shared_rss->ind_tbl = NULL;
14742         rte_errno = err;
14743         return -rte_errno;
14744 }
14745
14746 /**
14747  * Create shared RSS action.
14748  *
14749  * @param[in] dev
14750  *   Pointer to the Ethernet device structure.
14751  * @param[in] conf
14752  *   Shared action configuration.
14753  * @param[in] rss
14754  *   RSS action specification used to create shared action.
14755  * @param[out] error
14756  *   Perform verbose error reporting if not NULL. Initialized in case of
14757  *   error only.
14758  *
14759  * @return
14760  *   A valid shared action ID in case of success, 0 otherwise and
14761  *   rte_errno is set.
14762  */
14763 static uint32_t
14764 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14765                             const struct rte_flow_indir_action_conf *conf,
14766                             const struct rte_flow_action_rss *rss,
14767                             struct rte_flow_error *error)
14768 {
14769         struct mlx5_priv *priv = dev->data->dev_private;
14770         struct mlx5_shared_action_rss *shared_rss = NULL;
14771         void *queue = NULL;
14772         struct rte_flow_action_rss *origin;
14773         const uint8_t *rss_key;
14774         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14775         uint32_t idx;
14776
14777         RTE_SET_USED(conf);
14778         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14779                             0, SOCKET_ID_ANY);
14780         shared_rss = mlx5_ipool_zmalloc
14781                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14782         if (!shared_rss || !queue) {
14783                 rte_flow_error_set(error, ENOMEM,
14784                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14785                                    "cannot allocate resource memory");
14786                 goto error_rss_init;
14787         }
14788         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14789                 rte_flow_error_set(error, E2BIG,
14790                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14791                                    "rss action number out of range");
14792                 goto error_rss_init;
14793         }
14794         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14795                                           sizeof(*shared_rss->ind_tbl),
14796                                           0, SOCKET_ID_ANY);
14797         if (!shared_rss->ind_tbl) {
14798                 rte_flow_error_set(error, ENOMEM,
14799                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14800                                    "cannot allocate resource memory");
14801                 goto error_rss_init;
14802         }
14803         memcpy(queue, rss->queue, queue_size);
14804         shared_rss->ind_tbl->queues = queue;
14805         shared_rss->ind_tbl->queues_n = rss->queue_num;
14806         origin = &shared_rss->origin;
14807         origin->func = rss->func;
14808         origin->level = rss->level;
14809         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14810         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14811         /* NULL RSS key indicates default RSS key. */
14812         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14813         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14814         origin->key = &shared_rss->key[0];
14815         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14816         origin->queue = queue;
14817         origin->queue_num = rss->queue_num;
14818         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14819                 goto error_rss_init;
14820         rte_spinlock_init(&shared_rss->action_rss_sl);
14821         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14822         rte_spinlock_lock(&priv->shared_act_sl);
14823         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14824                      &priv->rss_shared_actions, idx, shared_rss, next);
14825         rte_spinlock_unlock(&priv->shared_act_sl);
14826         return idx;
14827 error_rss_init:
14828         if (shared_rss) {
14829                 if (shared_rss->ind_tbl)
14830                         mlx5_free(shared_rss->ind_tbl);
14831                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14832                                 idx);
14833         }
14834         if (queue)
14835                 mlx5_free(queue);
14836         return 0;
14837 }
14838
14839 /**
14840  * Destroy the shared RSS action.
14841  * Release related hash RX queue objects.
14842  *
14843  * @param[in] dev
14844  *   Pointer to the Ethernet device structure.
14845  * @param[in] idx
14846  *   The shared RSS action object ID to be removed.
14847  * @param[out] error
14848  *   Perform verbose error reporting if not NULL. Initialized in case of
14849  *   error only.
14850  *
14851  * @return
14852  *   0 on success, otherwise negative errno value.
14853  */
14854 static int
14855 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14856                              struct rte_flow_error *error)
14857 {
14858         struct mlx5_priv *priv = dev->data->dev_private;
14859         struct mlx5_shared_action_rss *shared_rss =
14860             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14861         uint32_t old_refcnt = 1;
14862         int remaining;
14863         uint16_t *queue = NULL;
14864
14865         if (!shared_rss)
14866                 return rte_flow_error_set(error, EINVAL,
14867                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14868                                           "invalid shared action");
14869         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14870                                          0, 0, __ATOMIC_ACQUIRE,
14871                                          __ATOMIC_RELAXED))
14872                 return rte_flow_error_set(error, EBUSY,
14873                                           RTE_FLOW_ERROR_TYPE_ACTION,
14874                                           NULL,
14875                                           "shared rss has references");
14876         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14877         if (remaining)
14878                 return rte_flow_error_set(error, EBUSY,
14879                                           RTE_FLOW_ERROR_TYPE_ACTION,
14880                                           NULL,
14881                                           "shared rss hrxq has references");
14882         queue = shared_rss->ind_tbl->queues;
14883         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
14884                                                !!dev->data->dev_started);
14885         if (remaining)
14886                 return rte_flow_error_set(error, EBUSY,
14887                                           RTE_FLOW_ERROR_TYPE_ACTION,
14888                                           NULL,
14889                                           "shared rss indirection table has"
14890                                           " references");
14891         mlx5_free(queue);
14892         rte_spinlock_lock(&priv->shared_act_sl);
14893         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14894                      &priv->rss_shared_actions, idx, shared_rss, next);
14895         rte_spinlock_unlock(&priv->shared_act_sl);
14896         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14897                         idx);
14898         return 0;
14899 }
14900
14901 /**
14902  * Create indirect action, lock free,
14903  * (mutex should be acquired by caller).
14904  * Dispatcher for action type specific call.
14905  *
14906  * @param[in] dev
14907  *   Pointer to the Ethernet device structure.
14908  * @param[in] conf
14909  *   Shared action configuration.
14910  * @param[in] action
14911  *   Action specification used to create indirect action.
14912  * @param[out] error
14913  *   Perform verbose error reporting if not NULL. Initialized in case of
14914  *   error only.
14915  *
14916  * @return
14917  *   A valid shared action handle in case of success, NULL otherwise and
14918  *   rte_errno is set.
14919  */
14920 static struct rte_flow_action_handle *
14921 flow_dv_action_create(struct rte_eth_dev *dev,
14922                       const struct rte_flow_indir_action_conf *conf,
14923                       const struct rte_flow_action *action,
14924                       struct rte_flow_error *err)
14925 {
14926         struct mlx5_priv *priv = dev->data->dev_private;
14927         uint32_t age_idx = 0;
14928         uint32_t idx = 0;
14929         uint32_t ret = 0;
14930
14931         switch (action->type) {
14932         case RTE_FLOW_ACTION_TYPE_RSS:
14933                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14934                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14935                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14936                 break;
14937         case RTE_FLOW_ACTION_TYPE_AGE:
14938                 age_idx = flow_dv_aso_age_alloc(dev, err);
14939                 if (!age_idx) {
14940                         ret = -rte_errno;
14941                         break;
14942                 }
14943                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14944                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14945                 flow_dv_aso_age_params_init(dev, age_idx,
14946                                         ((const struct rte_flow_action_age *)
14947                                                 action->conf)->context ?
14948                                         ((const struct rte_flow_action_age *)
14949                                                 action->conf)->context :
14950                                         (void *)(uintptr_t)idx,
14951                                         ((const struct rte_flow_action_age *)
14952                                                 action->conf)->timeout);
14953                 ret = age_idx;
14954                 break;
14955         case RTE_FLOW_ACTION_TYPE_COUNT:
14956                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14957                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14958                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14959                 break;
14960         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14961                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14962                                                          err);
14963                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14964                 break;
14965         default:
14966                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14967                                    NULL, "action type not supported");
14968                 break;
14969         }
14970         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14971 }
14972
14973 /**
14974  * Destroy the indirect action.
14975  * Release action related resources on the NIC and the memory.
14976  * Lock free, (mutex should be acquired by caller).
14977  * Dispatcher for action type specific call.
14978  *
14979  * @param[in] dev
14980  *   Pointer to the Ethernet device structure.
14981  * @param[in] handle
14982  *   The indirect action object handle to be removed.
14983  * @param[out] error
14984  *   Perform verbose error reporting if not NULL. Initialized in case of
14985  *   error only.
14986  *
14987  * @return
14988  *   0 on success, otherwise negative errno value.
14989  */
14990 static int
14991 flow_dv_action_destroy(struct rte_eth_dev *dev,
14992                        struct rte_flow_action_handle *handle,
14993                        struct rte_flow_error *error)
14994 {
14995         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14996         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14997         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14998         struct mlx5_flow_counter *cnt;
14999         uint32_t no_flow_refcnt = 1;
15000         int ret;
15001
15002         switch (type) {
15003         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15004                 return __flow_dv_action_rss_release(dev, idx, error);
15005         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15006                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15007                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15008                                                  &no_flow_refcnt, 1, false,
15009                                                  __ATOMIC_ACQUIRE,
15010                                                  __ATOMIC_RELAXED))
15011                         return rte_flow_error_set(error, EBUSY,
15012                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15013                                                   NULL,
15014                                                   "Indirect count action has references");
15015                 flow_dv_counter_free(dev, idx);
15016                 return 0;
15017         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15018                 ret = flow_dv_aso_age_release(dev, idx);
15019                 if (ret)
15020                         /*
15021                          * In this case, the last flow has a reference will
15022                          * actually release the age action.
15023                          */
15024                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15025                                 " released with references %d.", idx, ret);
15026                 return 0;
15027         case MLX5_INDIRECT_ACTION_TYPE_CT:
15028                 ret = flow_dv_aso_ct_release(dev, idx, error);
15029                 if (ret < 0)
15030                         return ret;
15031                 if (ret > 0)
15032                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15033                                 "has references %d.", idx, ret);
15034                 return 0;
15035         default:
15036                 return rte_flow_error_set(error, ENOTSUP,
15037                                           RTE_FLOW_ERROR_TYPE_ACTION,
15038                                           NULL,
15039                                           "action type not supported");
15040         }
15041 }
15042
15043 /**
15044  * Updates in place shared RSS action configuration.
15045  *
15046  * @param[in] dev
15047  *   Pointer to the Ethernet device structure.
15048  * @param[in] idx
15049  *   The shared RSS action object ID to be updated.
15050  * @param[in] action_conf
15051  *   RSS action specification used to modify *shared_rss*.
15052  * @param[out] error
15053  *   Perform verbose error reporting if not NULL. Initialized in case of
15054  *   error only.
15055  *
15056  * @return
15057  *   0 on success, otherwise negative errno value.
15058  * @note: currently only support update of RSS queues.
15059  */
15060 static int
15061 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15062                             const struct rte_flow_action_rss *action_conf,
15063                             struct rte_flow_error *error)
15064 {
15065         struct mlx5_priv *priv = dev->data->dev_private;
15066         struct mlx5_shared_action_rss *shared_rss =
15067             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15068         int ret = 0;
15069         void *queue = NULL;
15070         uint16_t *queue_old = NULL;
15071         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15072         bool dev_started = !!dev->data->dev_started;
15073
15074         if (!shared_rss)
15075                 return rte_flow_error_set(error, EINVAL,
15076                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15077                                           "invalid shared action to update");
15078         if (priv->obj_ops.ind_table_modify == NULL)
15079                 return rte_flow_error_set(error, ENOTSUP,
15080                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15081                                           "cannot modify indirection table");
15082         queue = mlx5_malloc(MLX5_MEM_ZERO,
15083                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15084                             0, SOCKET_ID_ANY);
15085         if (!queue)
15086                 return rte_flow_error_set(error, ENOMEM,
15087                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15088                                           NULL,
15089                                           "cannot allocate resource memory");
15090         memcpy(queue, action_conf->queue, queue_size);
15091         MLX5_ASSERT(shared_rss->ind_tbl);
15092         rte_spinlock_lock(&shared_rss->action_rss_sl);
15093         queue_old = shared_rss->ind_tbl->queues;
15094         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15095                                         queue, action_conf->queue_num,
15096                                         true /* standalone */,
15097                                         dev_started /* ref_new_qs */,
15098                                         dev_started /* deref_old_qs */);
15099         if (ret) {
15100                 mlx5_free(queue);
15101                 ret = rte_flow_error_set(error, rte_errno,
15102                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15103                                           "cannot update indirection table");
15104         } else {
15105                 mlx5_free(queue_old);
15106                 shared_rss->origin.queue = queue;
15107                 shared_rss->origin.queue_num = action_conf->queue_num;
15108         }
15109         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15110         return ret;
15111 }
15112
15113 /*
15114  * Updates in place conntrack context or direction.
15115  * Context update should be synchronized.
15116  *
15117  * @param[in] dev
15118  *   Pointer to the Ethernet device structure.
15119  * @param[in] idx
15120  *   The conntrack object ID to be updated.
15121  * @param[in] update
15122  *   Pointer to the structure of information to update.
15123  * @param[out] error
15124  *   Perform verbose error reporting if not NULL. Initialized in case of
15125  *   error only.
15126  *
15127  * @return
15128  *   0 on success, otherwise negative errno value.
15129  */
15130 static int
15131 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15132                            const struct rte_flow_modify_conntrack *update,
15133                            struct rte_flow_error *error)
15134 {
15135         struct mlx5_priv *priv = dev->data->dev_private;
15136         struct mlx5_aso_ct_action *ct;
15137         const struct rte_flow_action_conntrack *new_prf;
15138         int ret = 0;
15139         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15140         uint32_t dev_idx;
15141
15142         if (PORT_ID(priv) != owner)
15143                 return rte_flow_error_set(error, EACCES,
15144                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15145                                           NULL,
15146                                           "CT object owned by another port");
15147         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15148         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15149         if (!ct->refcnt)
15150                 return rte_flow_error_set(error, ENOMEM,
15151                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15152                                           NULL,
15153                                           "CT object is inactive");
15154         new_prf = &update->new_ct;
15155         if (update->direction)
15156                 ct->is_original = !!new_prf->is_original_dir;
15157         if (update->state) {
15158                 /* Only validate the profile when it needs to be updated. */
15159                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15160                 if (ret)
15161                         return ret;
15162                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15163                 if (ret)
15164                         return rte_flow_error_set(error, EIO,
15165                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15166                                         NULL,
15167                                         "Failed to send CT context update WQE");
15168                 /* Block until ready or a failure. */
15169                 ret = mlx5_aso_ct_available(priv->sh, ct);
15170                 if (ret)
15171                         rte_flow_error_set(error, rte_errno,
15172                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15173                                            NULL,
15174                                            "Timeout to get the CT update");
15175         }
15176         return ret;
15177 }
15178
15179 /**
15180  * Updates in place shared action configuration, lock free,
15181  * (mutex should be acquired by caller).
15182  *
15183  * @param[in] dev
15184  *   Pointer to the Ethernet device structure.
15185  * @param[in] handle
15186  *   The indirect action object handle to be updated.
15187  * @param[in] update
15188  *   Action specification used to modify the action pointed by *handle*.
15189  *   *update* could be of same type with the action pointed by the *handle*
15190  *   handle argument, or some other structures like a wrapper, depending on
15191  *   the indirect action type.
15192  * @param[out] error
15193  *   Perform verbose error reporting if not NULL. Initialized in case of
15194  *   error only.
15195  *
15196  * @return
15197  *   0 on success, otherwise negative errno value.
15198  */
15199 static int
15200 flow_dv_action_update(struct rte_eth_dev *dev,
15201                         struct rte_flow_action_handle *handle,
15202                         const void *update,
15203                         struct rte_flow_error *err)
15204 {
15205         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15206         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15207         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15208         const void *action_conf;
15209
15210         switch (type) {
15211         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15212                 action_conf = ((const struct rte_flow_action *)update)->conf;
15213                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15214         case MLX5_INDIRECT_ACTION_TYPE_CT:
15215                 return __flow_dv_action_ct_update(dev, idx, update, err);
15216         default:
15217                 return rte_flow_error_set(err, ENOTSUP,
15218                                           RTE_FLOW_ERROR_TYPE_ACTION,
15219                                           NULL,
15220                                           "action type update not supported");
15221         }
15222 }
15223
15224 /**
15225  * Destroy the meter sub policy table rules.
15226  * Lock free, (mutex should be acquired by caller).
15227  *
15228  * @param[in] dev
15229  *   Pointer to Ethernet device.
15230  * @param[in] sub_policy
15231  *   Pointer to meter sub policy table.
15232  */
15233 static void
15234 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15235                              struct mlx5_flow_meter_sub_policy *sub_policy)
15236 {
15237         struct mlx5_priv *priv = dev->data->dev_private;
15238         struct mlx5_flow_tbl_data_entry *tbl;
15239         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15240         struct mlx5_flow_meter_info *next_fm;
15241         struct mlx5_sub_policy_color_rule *color_rule;
15242         void *tmp;
15243         uint32_t i;
15244
15245         for (i = 0; i < RTE_COLORS; i++) {
15246                 next_fm = NULL;
15247                 if (i == RTE_COLOR_GREEN && policy &&
15248                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15249                         next_fm = mlx5_flow_meter_find(priv,
15250                                         policy->act_cnt[i].next_mtr_id, NULL);
15251                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15252                                    next_port, tmp) {
15253                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15254                         tbl = container_of(color_rule->matcher->tbl,
15255                                            typeof(*tbl), tbl);
15256                         mlx5_list_unregister(tbl->matchers,
15257                                              &color_rule->matcher->entry);
15258                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15259                                      color_rule, next_port);
15260                         mlx5_free(color_rule);
15261                         if (next_fm)
15262                                 mlx5_flow_meter_detach(priv, next_fm);
15263                 }
15264         }
15265         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15266                 if (sub_policy->rix_hrxq[i]) {
15267                         if (policy && !policy->is_hierarchy)
15268                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15269                         sub_policy->rix_hrxq[i] = 0;
15270                 }
15271                 if (sub_policy->jump_tbl[i]) {
15272                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15273                                                      sub_policy->jump_tbl[i]);
15274                         sub_policy->jump_tbl[i] = NULL;
15275                 }
15276         }
15277         if (sub_policy->tbl_rsc) {
15278                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15279                                              sub_policy->tbl_rsc);
15280                 sub_policy->tbl_rsc = NULL;
15281         }
15282 }
15283
15284 /**
15285  * Destroy policy rules, lock free,
15286  * (mutex should be acquired by caller).
15287  * Dispatcher for action type specific call.
15288  *
15289  * @param[in] dev
15290  *   Pointer to the Ethernet device structure.
15291  * @param[in] mtr_policy
15292  *   Meter policy struct.
15293  */
15294 static void
15295 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15296                              struct mlx5_flow_meter_policy *mtr_policy)
15297 {
15298         uint32_t i, j;
15299         struct mlx5_flow_meter_sub_policy *sub_policy;
15300         uint16_t sub_policy_num;
15301
15302         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15303                 sub_policy_num = (mtr_policy->sub_policy_num >>
15304                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15305                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15306                 for (j = 0; j < sub_policy_num; j++) {
15307                         sub_policy = mtr_policy->sub_policys[i][j];
15308                         if (sub_policy)
15309                                 __flow_dv_destroy_sub_policy_rules(dev,
15310                                                                    sub_policy);
15311                 }
15312         }
15313 }
15314
15315 /**
15316  * Destroy policy action, lock free,
15317  * (mutex should be acquired by caller).
15318  * Dispatcher for action type specific call.
15319  *
15320  * @param[in] dev
15321  *   Pointer to the Ethernet device structure.
15322  * @param[in] mtr_policy
15323  *   Meter policy struct.
15324  */
15325 static void
15326 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15327                       struct mlx5_flow_meter_policy *mtr_policy)
15328 {
15329         struct rte_flow_action *rss_action;
15330         struct mlx5_flow_handle dev_handle;
15331         uint32_t i, j;
15332
15333         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15334                 if (mtr_policy->act_cnt[i].rix_mark) {
15335                         flow_dv_tag_release(dev,
15336                                 mtr_policy->act_cnt[i].rix_mark);
15337                         mtr_policy->act_cnt[i].rix_mark = 0;
15338                 }
15339                 if (mtr_policy->act_cnt[i].modify_hdr) {
15340                         dev_handle.dvh.modify_hdr =
15341                                 mtr_policy->act_cnt[i].modify_hdr;
15342                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15343                 }
15344                 switch (mtr_policy->act_cnt[i].fate_action) {
15345                 case MLX5_FLOW_FATE_SHARED_RSS:
15346                         rss_action = mtr_policy->act_cnt[i].rss;
15347                         mlx5_free(rss_action);
15348                         break;
15349                 case MLX5_FLOW_FATE_PORT_ID:
15350                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15351                                 flow_dv_port_id_action_resource_release(dev,
15352                                 mtr_policy->act_cnt[i].rix_port_id_action);
15353                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15354                         }
15355                         break;
15356                 case MLX5_FLOW_FATE_DROP:
15357                 case MLX5_FLOW_FATE_JUMP:
15358                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15359                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15360                                                 NULL;
15361                         break;
15362                 default:
15363                         /*Queue action do nothing*/
15364                         break;
15365                 }
15366         }
15367         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15368                 mtr_policy->dr_drop_action[j] = NULL;
15369 }
15370
15371 /**
15372  * Create policy action per domain, lock free,
15373  * (mutex should be acquired by caller).
15374  * Dispatcher for action type specific call.
15375  *
15376  * @param[in] dev
15377  *   Pointer to the Ethernet device structure.
15378  * @param[in] mtr_policy
15379  *   Meter policy struct.
15380  * @param[in] action
15381  *   Action specification used to create meter actions.
15382  * @param[out] error
15383  *   Perform verbose error reporting if not NULL. Initialized in case of
15384  *   error only.
15385  *
15386  * @return
15387  *   0 on success, otherwise negative errno value.
15388  */
15389 static int
15390 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15391                         struct mlx5_flow_meter_policy *mtr_policy,
15392                         const struct rte_flow_action *actions[RTE_COLORS],
15393                         enum mlx5_meter_domain domain,
15394                         struct rte_mtr_error *error)
15395 {
15396         struct mlx5_priv *priv = dev->data->dev_private;
15397         struct rte_flow_error flow_err;
15398         const struct rte_flow_action *act;
15399         uint64_t action_flags;
15400         struct mlx5_flow_handle dh;
15401         struct mlx5_flow dev_flow;
15402         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15403         int i, ret;
15404         uint8_t egress, transfer;
15405         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15406         union {
15407                 struct mlx5_flow_dv_modify_hdr_resource res;
15408                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15409                             sizeof(struct mlx5_modification_cmd) *
15410                             (MLX5_MAX_MODIFY_NUM + 1)];
15411         } mhdr_dummy;
15412         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15413         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
15414
15415         MLX5_ASSERT(wks);
15416         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15417         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15418         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15419         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15420         memset(&port_id_action, 0,
15421                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15422         memset(mhdr_res, 0, sizeof(*mhdr_res));
15423         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15424                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15425                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15426         dev_flow.handle = &dh;
15427         dev_flow.dv.port_id_action = &port_id_action;
15428         dev_flow.external = true;
15429         for (i = 0; i < RTE_COLORS; i++) {
15430                 if (i < MLX5_MTR_RTE_COLORS)
15431                         act_cnt = &mtr_policy->act_cnt[i];
15432                 /* Skip the color policy actions creation. */
15433                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15434                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15435                         continue;
15436                 action_flags = 0;
15437                 for (act = actions[i];
15438                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15439                         switch (act->type) {
15440                         case RTE_FLOW_ACTION_TYPE_MARK:
15441                         {
15442                                 uint32_t tag_be = mlx5_flow_mark_set
15443                                         (((const struct rte_flow_action_mark *)
15444                                         (act->conf))->id);
15445
15446                                 if (i >= MLX5_MTR_RTE_COLORS)
15447                                         return -rte_mtr_error_set(error,
15448                                           ENOTSUP,
15449                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15450                                           NULL,
15451                                           "cannot create policy "
15452                                           "mark action for this color");
15453                                 wks->mark = 1;
15454                                 if (flow_dv_tag_resource_register(dev, tag_be,
15455                                                   &dev_flow, &flow_err))
15456                                         return -rte_mtr_error_set(error,
15457                                         ENOTSUP,
15458                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15459                                         NULL,
15460                                         "cannot setup policy mark action");
15461                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15462                                 act_cnt->rix_mark =
15463                                         dev_flow.handle->dvh.rix_tag;
15464                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15465                                 break;
15466                         }
15467                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15468                                 if (i >= MLX5_MTR_RTE_COLORS)
15469                                         return -rte_mtr_error_set(error,
15470                                           ENOTSUP,
15471                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15472                                           NULL,
15473                                           "cannot create policy "
15474                                           "set tag action for this color");
15475                                 if (flow_dv_convert_action_set_tag
15476                                 (dev, mhdr_res,
15477                                 (const struct rte_flow_action_set_tag *)
15478                                 act->conf,  &flow_err))
15479                                         return -rte_mtr_error_set(error,
15480                                         ENOTSUP,
15481                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15482                                         NULL, "cannot convert policy "
15483                                         "set tag action");
15484                                 if (!mhdr_res->actions_num)
15485                                         return -rte_mtr_error_set(error,
15486                                         ENOTSUP,
15487                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15488                                         NULL, "cannot find policy "
15489                                         "set tag action");
15490                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15491                                 break;
15492                         case RTE_FLOW_ACTION_TYPE_DROP:
15493                         {
15494                                 struct mlx5_flow_mtr_mng *mtrmng =
15495                                                 priv->sh->mtrmng;
15496                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15497
15498                                 /*
15499                                  * Create the drop table with
15500                                  * METER DROP level.
15501                                  */
15502                                 if (!mtrmng->drop_tbl[domain]) {
15503                                         mtrmng->drop_tbl[domain] =
15504                                         flow_dv_tbl_resource_get(dev,
15505                                         MLX5_FLOW_TABLE_LEVEL_METER,
15506                                         egress, transfer, false, NULL, 0,
15507                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15508                                         if (!mtrmng->drop_tbl[domain])
15509                                                 return -rte_mtr_error_set
15510                                         (error, ENOTSUP,
15511                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15512                                         NULL,
15513                                         "Failed to create meter drop table");
15514                                 }
15515                                 tbl_data = container_of
15516                                 (mtrmng->drop_tbl[domain],
15517                                 struct mlx5_flow_tbl_data_entry, tbl);
15518                                 if (i < MLX5_MTR_RTE_COLORS) {
15519                                         act_cnt->dr_jump_action[domain] =
15520                                                 tbl_data->jump.action;
15521                                         act_cnt->fate_action =
15522                                                 MLX5_FLOW_FATE_DROP;
15523                                 }
15524                                 if (i == RTE_COLOR_RED)
15525                                         mtr_policy->dr_drop_action[domain] =
15526                                                 tbl_data->jump.action;
15527                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15528                                 break;
15529                         }
15530                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15531                         {
15532                                 if (i >= MLX5_MTR_RTE_COLORS)
15533                                         return -rte_mtr_error_set(error,
15534                                         ENOTSUP,
15535                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15536                                         NULL, "cannot create policy "
15537                                         "fate queue for this color");
15538                                 act_cnt->queue =
15539                                 ((const struct rte_flow_action_queue *)
15540                                         (act->conf))->index;
15541                                 act_cnt->fate_action =
15542                                         MLX5_FLOW_FATE_QUEUE;
15543                                 dev_flow.handle->fate_action =
15544                                         MLX5_FLOW_FATE_QUEUE;
15545                                 mtr_policy->is_queue = 1;
15546                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15547                                 break;
15548                         }
15549                         case RTE_FLOW_ACTION_TYPE_RSS:
15550                         {
15551                                 int rss_size;
15552
15553                                 if (i >= MLX5_MTR_RTE_COLORS)
15554                                         return -rte_mtr_error_set(error,
15555                                           ENOTSUP,
15556                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15557                                           NULL,
15558                                           "cannot create policy "
15559                                           "rss action for this color");
15560                                 /*
15561                                  * Save RSS conf into policy struct
15562                                  * for translate stage.
15563                                  */
15564                                 rss_size = (int)rte_flow_conv
15565                                         (RTE_FLOW_CONV_OP_ACTION,
15566                                         NULL, 0, act, &flow_err);
15567                                 if (rss_size <= 0)
15568                                         return -rte_mtr_error_set(error,
15569                                           ENOTSUP,
15570                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15571                                           NULL, "Get the wrong "
15572                                           "rss action struct size");
15573                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15574                                                 rss_size, 0, SOCKET_ID_ANY);
15575                                 if (!act_cnt->rss)
15576                                         return -rte_mtr_error_set(error,
15577                                           ENOTSUP,
15578                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15579                                           NULL,
15580                                           "Fail to malloc rss action memory");
15581                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15582                                         act_cnt->rss, rss_size,
15583                                         act, &flow_err);
15584                                 if (ret < 0)
15585                                         return -rte_mtr_error_set(error,
15586                                           ENOTSUP,
15587                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15588                                           NULL, "Fail to save "
15589                                           "rss action into policy struct");
15590                                 act_cnt->fate_action =
15591                                         MLX5_FLOW_FATE_SHARED_RSS;
15592                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15593                                 break;
15594                         }
15595                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15596                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15597                         {
15598                                 struct mlx5_flow_dv_port_id_action_resource
15599                                         port_id_resource;
15600                                 uint32_t port_id = 0;
15601
15602                                 if (i >= MLX5_MTR_RTE_COLORS)
15603                                         return -rte_mtr_error_set(error,
15604                                         ENOTSUP,
15605                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15606                                         NULL, "cannot create policy "
15607                                         "port action for this color");
15608                                 memset(&port_id_resource, 0,
15609                                         sizeof(port_id_resource));
15610                                 if (flow_dv_translate_action_port_id(dev, act,
15611                                                 &port_id, &flow_err))
15612                                         return -rte_mtr_error_set(error,
15613                                         ENOTSUP,
15614                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15615                                         NULL, "cannot translate "
15616                                         "policy port action");
15617                                 port_id_resource.port_id = port_id;
15618                                 if (flow_dv_port_id_action_resource_register
15619                                         (dev, &port_id_resource,
15620                                         &dev_flow, &flow_err))
15621                                         return -rte_mtr_error_set(error,
15622                                         ENOTSUP,
15623                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15624                                         NULL, "cannot setup "
15625                                         "policy port action");
15626                                 act_cnt->rix_port_id_action =
15627                                         dev_flow.handle->rix_port_id_action;
15628                                 act_cnt->fate_action =
15629                                         MLX5_FLOW_FATE_PORT_ID;
15630                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15631                                 break;
15632                         }
15633                         case RTE_FLOW_ACTION_TYPE_JUMP:
15634                         {
15635                                 uint32_t jump_group = 0;
15636                                 uint32_t table = 0;
15637                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15638                                 struct flow_grp_info grp_info = {
15639                                         .external = !!dev_flow.external,
15640                                         .transfer = !!transfer,
15641                                         .fdb_def_rule = !!priv->fdb_def_rule,
15642                                         .std_tbl_fix = 0,
15643                                         .skip_scale = dev_flow.skip_scale &
15644                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15645                                 };
15646                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15647                                         mtr_policy->sub_policys[domain][0];
15648
15649                                 if (i >= MLX5_MTR_RTE_COLORS)
15650                                         return -rte_mtr_error_set(error,
15651                                           ENOTSUP,
15652                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15653                                           NULL,
15654                                           "cannot create policy "
15655                                           "jump action for this color");
15656                                 jump_group =
15657                                 ((const struct rte_flow_action_jump *)
15658                                                         act->conf)->group;
15659                                 if (mlx5_flow_group_to_table(dev, NULL,
15660                                                        jump_group,
15661                                                        &table,
15662                                                        &grp_info, &flow_err))
15663                                         return -rte_mtr_error_set(error,
15664                                         ENOTSUP,
15665                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15666                                         NULL, "cannot setup "
15667                                         "policy jump action");
15668                                 sub_policy->jump_tbl[i] =
15669                                 flow_dv_tbl_resource_get(dev,
15670                                         table, egress,
15671                                         transfer,
15672                                         !!dev_flow.external,
15673                                         NULL, jump_group, 0,
15674                                         0, &flow_err);
15675                                 if
15676                                 (!sub_policy->jump_tbl[i])
15677                                         return  -rte_mtr_error_set(error,
15678                                         ENOTSUP,
15679                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15680                                         NULL, "cannot create jump action.");
15681                                 tbl_data = container_of
15682                                 (sub_policy->jump_tbl[i],
15683                                 struct mlx5_flow_tbl_data_entry, tbl);
15684                                 act_cnt->dr_jump_action[domain] =
15685                                         tbl_data->jump.action;
15686                                 act_cnt->fate_action =
15687                                         MLX5_FLOW_FATE_JUMP;
15688                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15689                                 break;
15690                         }
15691                         /*
15692                          * No need to check meter hierarchy for Y or R colors
15693                          * here since it is done in the validation stage.
15694                          */
15695                         case RTE_FLOW_ACTION_TYPE_METER:
15696                         {
15697                                 const struct rte_flow_action_meter *mtr;
15698                                 struct mlx5_flow_meter_info *next_fm;
15699                                 struct mlx5_flow_meter_policy *next_policy;
15700                                 struct rte_flow_action tag_action;
15701                                 struct mlx5_rte_flow_action_set_tag set_tag;
15702                                 uint32_t next_mtr_idx = 0;
15703
15704                                 mtr = act->conf;
15705                                 next_fm = mlx5_flow_meter_find(priv,
15706                                                         mtr->mtr_id,
15707                                                         &next_mtr_idx);
15708                                 if (!next_fm)
15709                                         return -rte_mtr_error_set(error, EINVAL,
15710                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15711                                                 "Fail to find next meter.");
15712                                 if (next_fm->def_policy)
15713                                         return -rte_mtr_error_set(error, EINVAL,
15714                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15715                                 "Hierarchy only supports termination meter.");
15716                                 next_policy = mlx5_flow_meter_policy_find(dev,
15717                                                 next_fm->policy_id, NULL);
15718                                 MLX5_ASSERT(next_policy);
15719                                 if (next_fm->drop_cnt) {
15720                                         set_tag.id =
15721                                                 (enum modify_reg)
15722                                                 mlx5_flow_get_reg_id(dev,
15723                                                 MLX5_MTR_ID,
15724                                                 0,
15725                                                 (struct rte_flow_error *)error);
15726                                         set_tag.offset = (priv->mtr_reg_share ?
15727                                                 MLX5_MTR_COLOR_BITS : 0);
15728                                         set_tag.length = (priv->mtr_reg_share ?
15729                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15730                                                MLX5_REG_BITS);
15731                                         set_tag.data = next_mtr_idx;
15732                                         tag_action.type =
15733                                                 (enum rte_flow_action_type)
15734                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15735                                         tag_action.conf = &set_tag;
15736                                         if (flow_dv_convert_action_set_reg
15737                                                 (mhdr_res, &tag_action,
15738                                                 (struct rte_flow_error *)error))
15739                                                 return -rte_errno;
15740                                         action_flags |=
15741                                                 MLX5_FLOW_ACTION_SET_TAG;
15742                                 }
15743                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15744                                 act_cnt->next_mtr_id = next_fm->meter_id;
15745                                 act_cnt->next_sub_policy = NULL;
15746                                 mtr_policy->is_hierarchy = 1;
15747                                 mtr_policy->dev = next_policy->dev;
15748                                 action_flags |=
15749                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15750                                 break;
15751                         }
15752                         default:
15753                                 return -rte_mtr_error_set(error, ENOTSUP,
15754                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15755                                           NULL, "action type not supported");
15756                         }
15757                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15758                                 /* create modify action if needed. */
15759                                 dev_flow.dv.group = 1;
15760                                 if (flow_dv_modify_hdr_resource_register
15761                                         (dev, mhdr_res, &dev_flow, &flow_err))
15762                                         return -rte_mtr_error_set(error,
15763                                                 ENOTSUP,
15764                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15765                                                 NULL, "cannot register policy "
15766                                                 "set tag action");
15767                                 act_cnt->modify_hdr =
15768                                         dev_flow.handle->dvh.modify_hdr;
15769                         }
15770                 }
15771         }
15772         return 0;
15773 }
15774
15775 /**
15776  * Create policy action per domain, lock free,
15777  * (mutex should be acquired by caller).
15778  * Dispatcher for action type specific call.
15779  *
15780  * @param[in] dev
15781  *   Pointer to the Ethernet device structure.
15782  * @param[in] mtr_policy
15783  *   Meter policy struct.
15784  * @param[in] action
15785  *   Action specification used to create meter actions.
15786  * @param[out] error
15787  *   Perform verbose error reporting if not NULL. Initialized in case of
15788  *   error only.
15789  *
15790  * @return
15791  *   0 on success, otherwise negative errno value.
15792  */
15793 static int
15794 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15795                       struct mlx5_flow_meter_policy *mtr_policy,
15796                       const struct rte_flow_action *actions[RTE_COLORS],
15797                       struct rte_mtr_error *error)
15798 {
15799         int ret, i;
15800         uint16_t sub_policy_num;
15801
15802         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15803                 sub_policy_num = (mtr_policy->sub_policy_num >>
15804                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15805                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15806                 if (sub_policy_num) {
15807                         ret = __flow_dv_create_domain_policy_acts(dev,
15808                                 mtr_policy, actions,
15809                                 (enum mlx5_meter_domain)i, error);
15810                         /* Cleaning resource is done in the caller level. */
15811                         if (ret)
15812                                 return ret;
15813                 }
15814         }
15815         return 0;
15816 }
15817
15818 /**
15819  * Query a DV flow rule for its statistics via DevX.
15820  *
15821  * @param[in] dev
15822  *   Pointer to Ethernet device.
15823  * @param[in] cnt_idx
15824  *   Index to the flow counter.
15825  * @param[out] data
15826  *   Data retrieved by the query.
15827  * @param[out] error
15828  *   Perform verbose error reporting if not NULL.
15829  *
15830  * @return
15831  *   0 on success, a negative errno value otherwise and rte_errno is set.
15832  */
15833 int
15834 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15835                     struct rte_flow_error *error)
15836 {
15837         struct mlx5_priv *priv = dev->data->dev_private;
15838         struct rte_flow_query_count *qc = data;
15839
15840         if (!priv->sh->cdev->config.devx)
15841                 return rte_flow_error_set(error, ENOTSUP,
15842                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15843                                           NULL,
15844                                           "counters are not supported");
15845         if (cnt_idx) {
15846                 uint64_t pkts, bytes;
15847                 struct mlx5_flow_counter *cnt;
15848                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15849
15850                 if (err)
15851                         return rte_flow_error_set(error, -err,
15852                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15853                                         NULL, "cannot read counters");
15854                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15855                 qc->hits_set = 1;
15856                 qc->bytes_set = 1;
15857                 qc->hits = pkts - cnt->hits;
15858                 qc->bytes = bytes - cnt->bytes;
15859                 if (qc->reset) {
15860                         cnt->hits = pkts;
15861                         cnt->bytes = bytes;
15862                 }
15863                 return 0;
15864         }
15865         return rte_flow_error_set(error, EINVAL,
15866                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15867                                   NULL,
15868                                   "counters are not available");
15869 }
15870
15871
15872 /**
15873  * Query counter's action pointer for a DV flow rule via DevX.
15874  *
15875  * @param[in] dev
15876  *   Pointer to Ethernet device.
15877  * @param[in] cnt_idx
15878  *   Index to the flow counter.
15879  * @param[out] action_ptr
15880  *   Action pointer for counter.
15881  * @param[out] error
15882  *   Perform verbose error reporting if not NULL.
15883  *
15884  * @return
15885  *   0 on success, a negative errno value otherwise and rte_errno is set.
15886  */
15887 int
15888 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15889         void **action_ptr, struct rte_flow_error *error)
15890 {
15891         struct mlx5_priv *priv = dev->data->dev_private;
15892
15893         if (!priv->sh->cdev->config.devx || !action_ptr)
15894                 return rte_flow_error_set(error, ENOTSUP,
15895                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15896                                           NULL,
15897                                           "counters are not supported");
15898
15899         if (cnt_idx) {
15900                 struct mlx5_flow_counter *cnt = NULL;
15901                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15902                 if (cnt) {
15903                         *action_ptr = cnt->action;
15904                         return 0;
15905                 }
15906         }
15907         return rte_flow_error_set(error, EINVAL,
15908                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15909                                   NULL,
15910                                   "counters are not available");
15911 }
15912
15913 static int
15914 flow_dv_action_query(struct rte_eth_dev *dev,
15915                      const struct rte_flow_action_handle *handle, void *data,
15916                      struct rte_flow_error *error)
15917 {
15918         struct mlx5_age_param *age_param;
15919         struct rte_flow_query_age *resp;
15920         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15921         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15922         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15923         struct mlx5_priv *priv = dev->data->dev_private;
15924         struct mlx5_aso_ct_action *ct;
15925         uint16_t owner;
15926         uint32_t dev_idx;
15927
15928         switch (type) {
15929         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15930                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15931                 resp = data;
15932                 resp->aged = __atomic_load_n(&age_param->state,
15933                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15934                                                                           1 : 0;
15935                 resp->sec_since_last_hit_valid = !resp->aged;
15936                 if (resp->sec_since_last_hit_valid)
15937                         resp->sec_since_last_hit = __atomic_load_n
15938                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15939                 return 0;
15940         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15941                 return flow_dv_query_count(dev, idx, data, error);
15942         case MLX5_INDIRECT_ACTION_TYPE_CT:
15943                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15944                 if (owner != PORT_ID(priv))
15945                         return rte_flow_error_set(error, EACCES,
15946                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15947                                         NULL,
15948                                         "CT object owned by another port");
15949                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15950                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15951                 MLX5_ASSERT(ct);
15952                 if (!ct->refcnt)
15953                         return rte_flow_error_set(error, EFAULT,
15954                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15955                                         NULL,
15956                                         "CT object is inactive");
15957                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15958                                                         ct->peer;
15959                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15960                                                         ct->is_original;
15961                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15962                         return rte_flow_error_set(error, EIO,
15963                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15964                                         NULL,
15965                                         "Failed to query CT context");
15966                 return 0;
15967         default:
15968                 return rte_flow_error_set(error, ENOTSUP,
15969                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15970                                           "action type query not supported");
15971         }
15972 }
15973
15974 /**
15975  * Query a flow rule AGE action for aging information.
15976  *
15977  * @param[in] dev
15978  *   Pointer to Ethernet device.
15979  * @param[in] flow
15980  *   Pointer to the sub flow.
15981  * @param[out] data
15982  *   data retrieved by the query.
15983  * @param[out] error
15984  *   Perform verbose error reporting if not NULL.
15985  *
15986  * @return
15987  *   0 on success, a negative errno value otherwise and rte_errno is set.
15988  */
15989 static int
15990 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15991                   void *data, struct rte_flow_error *error)
15992 {
15993         struct rte_flow_query_age *resp = data;
15994         struct mlx5_age_param *age_param;
15995
15996         if (flow->age) {
15997                 struct mlx5_aso_age_action *act =
15998                                      flow_aso_age_get_by_idx(dev, flow->age);
15999
16000                 age_param = &act->age_params;
16001         } else if (flow->counter) {
16002                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
16003
16004                 if (!age_param || !age_param->timeout)
16005                         return rte_flow_error_set
16006                                         (error, EINVAL,
16007                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16008                                          NULL, "cannot read age data");
16009         } else {
16010                 return rte_flow_error_set(error, EINVAL,
16011                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16012                                           NULL, "age data not available");
16013         }
16014         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16015                                      AGE_TMOUT ? 1 : 0;
16016         resp->sec_since_last_hit_valid = !resp->aged;
16017         if (resp->sec_since_last_hit_valid)
16018                 resp->sec_since_last_hit = __atomic_load_n
16019                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16020         return 0;
16021 }
16022
16023 /**
16024  * Query a flow.
16025  *
16026  * @see rte_flow_query()
16027  * @see rte_flow_ops
16028  */
16029 static int
16030 flow_dv_query(struct rte_eth_dev *dev,
16031               struct rte_flow *flow __rte_unused,
16032               const struct rte_flow_action *actions __rte_unused,
16033               void *data __rte_unused,
16034               struct rte_flow_error *error __rte_unused)
16035 {
16036         int ret = -EINVAL;
16037
16038         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16039                 switch (actions->type) {
16040                 case RTE_FLOW_ACTION_TYPE_VOID:
16041                         break;
16042                 case RTE_FLOW_ACTION_TYPE_COUNT:
16043                         ret = flow_dv_query_count(dev, flow->counter, data,
16044                                                   error);
16045                         break;
16046                 case RTE_FLOW_ACTION_TYPE_AGE:
16047                         ret = flow_dv_query_age(dev, flow, data, error);
16048                         break;
16049                 default:
16050                         return rte_flow_error_set(error, ENOTSUP,
16051                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16052                                                   actions,
16053                                                   "action not supported");
16054                 }
16055         }
16056         return ret;
16057 }
16058
16059 /**
16060  * Destroy the meter table set.
16061  * Lock free, (mutex should be acquired by caller).
16062  *
16063  * @param[in] dev
16064  *   Pointer to Ethernet device.
16065  * @param[in] fm
16066  *   Meter information table.
16067  */
16068 static void
16069 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16070                         struct mlx5_flow_meter_info *fm)
16071 {
16072         struct mlx5_priv *priv = dev->data->dev_private;
16073         int i;
16074
16075         if (!fm || !priv->sh->config.dv_flow_en)
16076                 return;
16077         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16078                 if (fm->drop_rule[i]) {
16079                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16080                         fm->drop_rule[i] = NULL;
16081                 }
16082         }
16083 }
16084
16085 static void
16086 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16087 {
16088         struct mlx5_priv *priv = dev->data->dev_private;
16089         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16090         struct mlx5_flow_tbl_data_entry *tbl;
16091         int i, j;
16092
16093         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16094                 if (mtrmng->def_rule[i]) {
16095                         claim_zero(mlx5_flow_os_destroy_flow
16096                                         (mtrmng->def_rule[i]));
16097                         mtrmng->def_rule[i] = NULL;
16098                 }
16099                 if (mtrmng->def_matcher[i]) {
16100                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16101                                 struct mlx5_flow_tbl_data_entry, tbl);
16102                         mlx5_list_unregister(tbl->matchers,
16103                                              &mtrmng->def_matcher[i]->entry);
16104                         mtrmng->def_matcher[i] = NULL;
16105                 }
16106                 for (j = 0; j < MLX5_REG_BITS; j++) {
16107                         if (mtrmng->drop_matcher[i][j]) {
16108                                 tbl =
16109                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16110                                              struct mlx5_flow_tbl_data_entry,
16111                                              tbl);
16112                                 mlx5_list_unregister(tbl->matchers,
16113                                             &mtrmng->drop_matcher[i][j]->entry);
16114                                 mtrmng->drop_matcher[i][j] = NULL;
16115                         }
16116                 }
16117                 if (mtrmng->drop_tbl[i]) {
16118                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16119                                 mtrmng->drop_tbl[i]);
16120                         mtrmng->drop_tbl[i] = NULL;
16121                 }
16122         }
16123 }
16124
16125 /* Number of meter flow actions, count and jump or count and drop. */
16126 #define METER_ACTIONS 2
16127
16128 static void
16129 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16130                                     enum mlx5_meter_domain domain)
16131 {
16132         struct mlx5_priv *priv = dev->data->dev_private;
16133         struct mlx5_flow_meter_def_policy *def_policy =
16134                         priv->sh->mtrmng->def_policy[domain];
16135
16136         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16137         mlx5_free(def_policy);
16138         priv->sh->mtrmng->def_policy[domain] = NULL;
16139 }
16140
16141 /**
16142  * Destroy the default policy table set.
16143  *
16144  * @param[in] dev
16145  *   Pointer to Ethernet device.
16146  */
16147 static void
16148 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16149 {
16150         struct mlx5_priv *priv = dev->data->dev_private;
16151         int i;
16152
16153         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16154                 if (priv->sh->mtrmng->def_policy[i])
16155                         __flow_dv_destroy_domain_def_policy(dev,
16156                                         (enum mlx5_meter_domain)i);
16157         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16158 }
16159
16160 static int
16161 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16162                         uint32_t color_reg_c_idx,
16163                         enum rte_color color, void *matcher_object,
16164                         int actions_n, void *actions,
16165                         bool match_src_port, const struct rte_flow_item *item,
16166                         void **rule, const struct rte_flow_attr *attr)
16167 {
16168         int ret;
16169         struct mlx5_flow_dv_match_params value = {
16170                 .size = sizeof(value.buf),
16171         };
16172         struct mlx5_flow_dv_match_params matcher = {
16173                 .size = sizeof(matcher.buf),
16174         };
16175         struct mlx5_priv *priv = dev->data->dev_private;
16176         uint8_t misc_mask;
16177
16178         if (match_src_port && priv->sh->esw_mode) {
16179                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16180                                                    value.buf, item, attr)) {
16181                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16182                                 " value with port.", color);
16183                         return -1;
16184                 }
16185         }
16186         flow_dv_match_meta_reg(matcher.buf, value.buf,
16187                                (enum modify_reg)color_reg_c_idx,
16188                                rte_col_2_mlx5_col(color), UINT32_MAX);
16189         misc_mask = flow_dv_matcher_enable(value.buf);
16190         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16191         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16192                                        actions_n, actions, rule);
16193         if (ret) {
16194                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16195                 return -1;
16196         }
16197         return 0;
16198 }
16199
16200 static int
16201 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16202                         uint32_t color_reg_c_idx,
16203                         uint16_t priority,
16204                         struct mlx5_flow_meter_sub_policy *sub_policy,
16205                         const struct rte_flow_attr *attr,
16206                         bool match_src_port,
16207                         const struct rte_flow_item *item,
16208                         struct mlx5_flow_dv_matcher **policy_matcher,
16209                         struct rte_flow_error *error)
16210 {
16211         struct mlx5_list_entry *entry;
16212         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16213         struct mlx5_flow_dv_matcher matcher = {
16214                 .mask = {
16215                         .size = sizeof(matcher.mask.buf),
16216                 },
16217                 .tbl = tbl_rsc,
16218         };
16219         struct mlx5_flow_dv_match_params value = {
16220                 .size = sizeof(value.buf),
16221         };
16222         struct mlx5_flow_cb_ctx ctx = {
16223                 .error = error,
16224                 .data = &matcher,
16225         };
16226         struct mlx5_flow_tbl_data_entry *tbl_data;
16227         struct mlx5_priv *priv = dev->data->dev_private;
16228         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16229
16230         if (match_src_port && priv->sh->esw_mode) {
16231                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16232                                                    value.buf, item, attr)) {
16233                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16234                                 " with port.", priority);
16235                         return -1;
16236                 }
16237         }
16238         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16239         if (priority < RTE_COLOR_RED)
16240                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16241                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16242         matcher.priority = priority;
16243         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16244                                     matcher.mask.size);
16245         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16246         if (!entry) {
16247                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16248                 return -1;
16249         }
16250         *policy_matcher =
16251                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16252         return 0;
16253 }
16254
16255 /**
16256  * Create the policy rules per domain.
16257  *
16258  * @param[in] dev
16259  *   Pointer to Ethernet device.
16260  * @param[in] sub_policy
16261  *    Pointer to sub policy table..
16262  * @param[in] egress
16263  *   Direction of the table.
16264  * @param[in] transfer
16265  *   E-Switch or NIC flow.
16266  * @param[in] acts
16267  *   Pointer to policy action list per color.
16268  *
16269  * @return
16270  *   0 on success, -1 otherwise.
16271  */
16272 static int
16273 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16274                 struct mlx5_flow_meter_sub_policy *sub_policy,
16275                 uint8_t egress, uint8_t transfer, bool match_src_port,
16276                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16277 {
16278         struct mlx5_priv *priv = dev->data->dev_private;
16279         struct rte_flow_error flow_err;
16280         uint32_t color_reg_c_idx;
16281         struct rte_flow_attr attr = {
16282                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16283                 .priority = 0,
16284                 .ingress = 0,
16285                 .egress = !!egress,
16286                 .transfer = !!transfer,
16287                 .reserved = 0,
16288         };
16289         int i;
16290         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16291         struct mlx5_sub_policy_color_rule *color_rule;
16292         bool svport_match;
16293         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16294
16295         if (ret < 0)
16296                 return -1;
16297         /* Create policy table with POLICY level. */
16298         if (!sub_policy->tbl_rsc)
16299                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16300                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16301                                 egress, transfer, false, NULL, 0, 0,
16302                                 sub_policy->idx, &flow_err);
16303         if (!sub_policy->tbl_rsc) {
16304                 DRV_LOG(ERR,
16305                         "Failed to create meter sub policy table.");
16306                 return -1;
16307         }
16308         /* Prepare matchers. */
16309         color_reg_c_idx = ret;
16310         for (i = 0; i < RTE_COLORS; i++) {
16311                 TAILQ_INIT(&sub_policy->color_rules[i]);
16312                 if (!acts[i].actions_n)
16313                         continue;
16314                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16315                                 sizeof(struct mlx5_sub_policy_color_rule),
16316                                 0, SOCKET_ID_ANY);
16317                 if (!color_rule) {
16318                         DRV_LOG(ERR, "No memory to create color rule.");
16319                         goto err_exit;
16320                 }
16321                 tmp_rules[i] = color_rule;
16322                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16323                                   color_rule, next_port);
16324                 color_rule->src_port = priv->representor_id;
16325                 /* No use. */
16326                 attr.priority = i;
16327                 /* Create matchers for colors. */
16328                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16329                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16330                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16331                                 &attr, svport_match, NULL,
16332                                 &color_rule->matcher, &flow_err)) {
16333                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16334                         goto err_exit;
16335                 }
16336                 /* Create flow, matching color. */
16337                 if (__flow_dv_create_policy_flow(dev,
16338                                 color_reg_c_idx, (enum rte_color)i,
16339                                 color_rule->matcher->matcher_object,
16340                                 acts[i].actions_n, acts[i].dv_actions,
16341                                 svport_match, NULL, &color_rule->rule,
16342                                 &attr)) {
16343                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16344                         goto err_exit;
16345                 }
16346         }
16347         return 0;
16348 err_exit:
16349         /* All the policy rules will be cleared. */
16350         do {
16351                 color_rule = tmp_rules[i];
16352                 if (color_rule) {
16353                         if (color_rule->rule)
16354                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16355                         if (color_rule->matcher) {
16356                                 struct mlx5_flow_tbl_data_entry *tbl =
16357                                         container_of(color_rule->matcher->tbl,
16358                                                      typeof(*tbl), tbl);
16359                                 mlx5_list_unregister(tbl->matchers,
16360                                                 &color_rule->matcher->entry);
16361                         }
16362                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16363                                      color_rule, next_port);
16364                         mlx5_free(color_rule);
16365                 }
16366         } while (i--);
16367         return -1;
16368 }
16369
16370 static int
16371 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16372                         struct mlx5_flow_meter_policy *mtr_policy,
16373                         struct mlx5_flow_meter_sub_policy *sub_policy,
16374                         uint32_t domain)
16375 {
16376         struct mlx5_priv *priv = dev->data->dev_private;
16377         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16378         struct mlx5_flow_dv_tag_resource *tag;
16379         struct mlx5_flow_dv_port_id_action_resource *port_action;
16380         struct mlx5_hrxq *hrxq;
16381         struct mlx5_flow_meter_info *next_fm = NULL;
16382         struct mlx5_flow_meter_policy *next_policy;
16383         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16384         struct mlx5_flow_tbl_data_entry *tbl_data;
16385         struct rte_flow_error error;
16386         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16387         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16388         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16389         bool match_src_port = false;
16390         int i;
16391
16392         /* If RSS or Queue, no previous actions / rules is created. */
16393         for (i = 0; i < RTE_COLORS; i++) {
16394                 acts[i].actions_n = 0;
16395                 if (i == RTE_COLOR_RED) {
16396                         /* Only support drop on red. */
16397                         acts[i].dv_actions[0] =
16398                                 mtr_policy->dr_drop_action[domain];
16399                         acts[i].actions_n = 1;
16400                         continue;
16401                 }
16402                 if (i == RTE_COLOR_GREEN &&
16403                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16404                         struct rte_flow_attr attr = {
16405                                 .transfer = transfer
16406                         };
16407
16408                         next_fm = mlx5_flow_meter_find(priv,
16409                                         mtr_policy->act_cnt[i].next_mtr_id,
16410                                         NULL);
16411                         if (!next_fm) {
16412                                 DRV_LOG(ERR,
16413                                         "Failed to get next hierarchy meter.");
16414                                 goto err_exit;
16415                         }
16416                         if (mlx5_flow_meter_attach(priv, next_fm,
16417                                                    &attr, &error)) {
16418                                 DRV_LOG(ERR, "%s", error.message);
16419                                 next_fm = NULL;
16420                                 goto err_exit;
16421                         }
16422                         /* Meter action must be the first for TX. */
16423                         if (mtr_first) {
16424                                 acts[i].dv_actions[acts[i].actions_n] =
16425                                         next_fm->meter_action;
16426                                 acts[i].actions_n++;
16427                         }
16428                 }
16429                 if (mtr_policy->act_cnt[i].rix_mark) {
16430                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16431                                         mtr_policy->act_cnt[i].rix_mark);
16432                         if (!tag) {
16433                                 DRV_LOG(ERR, "Failed to find "
16434                                 "mark action for policy.");
16435                                 goto err_exit;
16436                         }
16437                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16438                         acts[i].actions_n++;
16439                 }
16440                 if (mtr_policy->act_cnt[i].modify_hdr) {
16441                         acts[i].dv_actions[acts[i].actions_n] =
16442                                 mtr_policy->act_cnt[i].modify_hdr->action;
16443                         acts[i].actions_n++;
16444                 }
16445                 if (mtr_policy->act_cnt[i].fate_action) {
16446                         switch (mtr_policy->act_cnt[i].fate_action) {
16447                         case MLX5_FLOW_FATE_PORT_ID:
16448                                 port_action = mlx5_ipool_get
16449                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16450                                 mtr_policy->act_cnt[i].rix_port_id_action);
16451                                 if (!port_action) {
16452                                         DRV_LOG(ERR, "Failed to find "
16453                                                 "port action for policy.");
16454                                         goto err_exit;
16455                                 }
16456                                 acts[i].dv_actions[acts[i].actions_n] =
16457                                         port_action->action;
16458                                 acts[i].actions_n++;
16459                                 mtr_policy->dev = dev;
16460                                 match_src_port = true;
16461                                 break;
16462                         case MLX5_FLOW_FATE_DROP:
16463                         case MLX5_FLOW_FATE_JUMP:
16464                                 acts[i].dv_actions[acts[i].actions_n] =
16465                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16466                                 acts[i].actions_n++;
16467                                 break;
16468                         case MLX5_FLOW_FATE_SHARED_RSS:
16469                         case MLX5_FLOW_FATE_QUEUE:
16470                                 hrxq = mlx5_ipool_get
16471                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16472                                          sub_policy->rix_hrxq[i]);
16473                                 if (!hrxq) {
16474                                         DRV_LOG(ERR, "Failed to find "
16475                                                 "queue action for policy.");
16476                                         goto err_exit;
16477                                 }
16478                                 acts[i].dv_actions[acts[i].actions_n] =
16479                                         hrxq->action;
16480                                 acts[i].actions_n++;
16481                                 break;
16482                         case MLX5_FLOW_FATE_MTR:
16483                                 if (!next_fm) {
16484                                         DRV_LOG(ERR,
16485                                                 "No next hierarchy meter.");
16486                                         goto err_exit;
16487                                 }
16488                                 if (!mtr_first) {
16489                                         acts[i].dv_actions[acts[i].actions_n] =
16490                                                         next_fm->meter_action;
16491                                         acts[i].actions_n++;
16492                                 }
16493                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16494                                         next_sub_policy =
16495                                         mtr_policy->act_cnt[i].next_sub_policy;
16496                                 } else {
16497                                         next_policy =
16498                                                 mlx5_flow_meter_policy_find(dev,
16499                                                 next_fm->policy_id, NULL);
16500                                         MLX5_ASSERT(next_policy);
16501                                         next_sub_policy =
16502                                         next_policy->sub_policys[domain][0];
16503                                 }
16504                                 tbl_data =
16505                                         container_of(next_sub_policy->tbl_rsc,
16506                                         struct mlx5_flow_tbl_data_entry, tbl);
16507                                 acts[i].dv_actions[acts[i].actions_n++] =
16508                                                         tbl_data->jump.action;
16509                                 if (mtr_policy->act_cnt[i].modify_hdr)
16510                                         match_src_port = !!transfer;
16511                                 break;
16512                         default:
16513                                 /*Queue action do nothing*/
16514                                 break;
16515                         }
16516                 }
16517         }
16518         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16519                                 egress, transfer, match_src_port, acts)) {
16520                 DRV_LOG(ERR,
16521                         "Failed to create policy rules per domain.");
16522                 goto err_exit;
16523         }
16524         return 0;
16525 err_exit:
16526         if (next_fm)
16527                 mlx5_flow_meter_detach(priv, next_fm);
16528         return -1;
16529 }
16530
16531 /**
16532  * Create the policy rules.
16533  *
16534  * @param[in] dev
16535  *   Pointer to Ethernet device.
16536  * @param[in,out] mtr_policy
16537  *   Pointer to meter policy table.
16538  *
16539  * @return
16540  *   0 on success, -1 otherwise.
16541  */
16542 static int
16543 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16544                              struct mlx5_flow_meter_policy *mtr_policy)
16545 {
16546         int i;
16547         uint16_t sub_policy_num;
16548
16549         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16550                 sub_policy_num = (mtr_policy->sub_policy_num >>
16551                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16552                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16553                 if (!sub_policy_num)
16554                         continue;
16555                 /* Prepare actions list and create policy rules. */
16556                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16557                         mtr_policy->sub_policys[i][0], i)) {
16558                         DRV_LOG(ERR, "Failed to create policy action "
16559                                 "list per domain.");
16560                         return -1;
16561                 }
16562         }
16563         return 0;
16564 }
16565
16566 static int
16567 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16568 {
16569         struct mlx5_priv *priv = dev->data->dev_private;
16570         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16571         struct mlx5_flow_meter_def_policy *def_policy;
16572         struct mlx5_flow_tbl_resource *jump_tbl;
16573         struct mlx5_flow_tbl_data_entry *tbl_data;
16574         uint8_t egress, transfer;
16575         struct rte_flow_error error;
16576         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16577         int ret;
16578
16579         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16580         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16581         def_policy = mtrmng->def_policy[domain];
16582         if (!def_policy) {
16583                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16584                         sizeof(struct mlx5_flow_meter_def_policy),
16585                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16586                 if (!def_policy) {
16587                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16588                         goto def_policy_error;
16589                 }
16590                 mtrmng->def_policy[domain] = def_policy;
16591                 /* Create the meter suffix table with SUFFIX level. */
16592                 jump_tbl = flow_dv_tbl_resource_get(dev,
16593                                 MLX5_FLOW_TABLE_LEVEL_METER,
16594                                 egress, transfer, false, NULL, 0,
16595                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16596                 if (!jump_tbl) {
16597                         DRV_LOG(ERR,
16598                                 "Failed to create meter suffix table.");
16599                         goto def_policy_error;
16600                 }
16601                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16602                 tbl_data = container_of(jump_tbl,
16603                                         struct mlx5_flow_tbl_data_entry, tbl);
16604                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16605                                                 tbl_data->jump.action;
16606                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16607                 acts[RTE_COLOR_GREEN].actions_n = 1;
16608                 /*
16609                  * YELLOW has the same default policy as GREEN does.
16610                  * G & Y share the same table and action. The 2nd time of table
16611                  * resource getting is just to update the reference count for
16612                  * the releasing stage.
16613                  */
16614                 jump_tbl = flow_dv_tbl_resource_get(dev,
16615                                 MLX5_FLOW_TABLE_LEVEL_METER,
16616                                 egress, transfer, false, NULL, 0,
16617                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16618                 if (!jump_tbl) {
16619                         DRV_LOG(ERR,
16620                                 "Failed to get meter suffix table.");
16621                         goto def_policy_error;
16622                 }
16623                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16624                 tbl_data = container_of(jump_tbl,
16625                                         struct mlx5_flow_tbl_data_entry, tbl);
16626                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16627                                                 tbl_data->jump.action;
16628                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16629                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16630                 /* Create jump action to the drop table. */
16631                 if (!mtrmng->drop_tbl[domain]) {
16632                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16633                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16634                                  egress, transfer, false, NULL, 0,
16635                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16636                         if (!mtrmng->drop_tbl[domain]) {
16637                                 DRV_LOG(ERR, "Failed to create meter "
16638                                         "drop table for default policy.");
16639                                 goto def_policy_error;
16640                         }
16641                 }
16642                 /* all RED: unique Drop table for jump action. */
16643                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16644                                         struct mlx5_flow_tbl_data_entry, tbl);
16645                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16646                                                 tbl_data->jump.action;
16647                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16648                 acts[RTE_COLOR_RED].actions_n = 1;
16649                 /* Create default policy rules. */
16650                 ret = __flow_dv_create_domain_policy_rules(dev,
16651                                         &def_policy->sub_policy,
16652                                         egress, transfer, false, acts);
16653                 if (ret) {
16654                         DRV_LOG(ERR, "Failed to create default policy rules.");
16655                         goto def_policy_error;
16656                 }
16657         }
16658         return 0;
16659 def_policy_error:
16660         __flow_dv_destroy_domain_def_policy(dev,
16661                                             (enum mlx5_meter_domain)domain);
16662         return -1;
16663 }
16664
16665 /**
16666  * Create the default policy table set.
16667  *
16668  * @param[in] dev
16669  *   Pointer to Ethernet device.
16670  * @return
16671  *   0 on success, -1 otherwise.
16672  */
16673 static int
16674 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16675 {
16676         struct mlx5_priv *priv = dev->data->dev_private;
16677         int i;
16678
16679         /* Non-termination policy table. */
16680         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16681                 if (!priv->sh->config.dv_esw_en &&
16682                     i == MLX5_MTR_DOMAIN_TRANSFER)
16683                         continue;
16684                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16685                         DRV_LOG(ERR, "Failed to create default policy");
16686                         /* Rollback the created default policies for others. */
16687                         flow_dv_destroy_def_policy(dev);
16688                         return -1;
16689                 }
16690         }
16691         return 0;
16692 }
16693
16694 /**
16695  * Create the needed meter tables.
16696  * Lock free, (mutex should be acquired by caller).
16697  *
16698  * @param[in] dev
16699  *   Pointer to Ethernet device.
16700  * @param[in] fm
16701  *   Meter information table.
16702  * @param[in] mtr_idx
16703  *   Meter index.
16704  * @param[in] domain_bitmap
16705  *   Domain bitmap.
16706  * @return
16707  *   0 on success, -1 otherwise.
16708  */
16709 static int
16710 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16711                         struct mlx5_flow_meter_info *fm,
16712                         uint32_t mtr_idx,
16713                         uint8_t domain_bitmap)
16714 {
16715         struct mlx5_priv *priv = dev->data->dev_private;
16716         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16717         struct rte_flow_error error;
16718         struct mlx5_flow_tbl_data_entry *tbl_data;
16719         uint8_t egress, transfer;
16720         void *actions[METER_ACTIONS];
16721         int domain, ret, i;
16722         struct mlx5_flow_counter *cnt;
16723         struct mlx5_flow_dv_match_params value = {
16724                 .size = sizeof(value.buf),
16725         };
16726         struct mlx5_flow_dv_match_params matcher_para = {
16727                 .size = sizeof(matcher_para.buf),
16728         };
16729         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16730                                                      0, &error);
16731         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16732         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16733         struct mlx5_list_entry *entry;
16734         struct mlx5_flow_dv_matcher matcher = {
16735                 .mask = {
16736                         .size = sizeof(matcher.mask.buf),
16737                 },
16738         };
16739         struct mlx5_flow_dv_matcher *drop_matcher;
16740         struct mlx5_flow_cb_ctx ctx = {
16741                 .error = &error,
16742                 .data = &matcher,
16743         };
16744         uint8_t misc_mask;
16745
16746         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16747                 rte_errno = ENOTSUP;
16748                 return -1;
16749         }
16750         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16751                 if (!(domain_bitmap & (1 << domain)) ||
16752                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16753                         continue;
16754                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16755                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16756                 /* Create the drop table with METER DROP level. */
16757                 if (!mtrmng->drop_tbl[domain]) {
16758                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16759                                         MLX5_FLOW_TABLE_LEVEL_METER,
16760                                         egress, transfer, false, NULL, 0,
16761                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16762                         if (!mtrmng->drop_tbl[domain]) {
16763                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16764                                 goto policy_error;
16765                         }
16766                 }
16767                 /* Create default matcher in drop table. */
16768                 matcher.tbl = mtrmng->drop_tbl[domain],
16769                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16770                                 struct mlx5_flow_tbl_data_entry, tbl);
16771                 if (!mtrmng->def_matcher[domain]) {
16772                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16773                                        (enum modify_reg)mtr_id_reg_c,
16774                                        0, 0);
16775                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16776                         matcher.crc = rte_raw_cksum
16777                                         ((const void *)matcher.mask.buf,
16778                                         matcher.mask.size);
16779                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16780                         if (!entry) {
16781                                 DRV_LOG(ERR, "Failed to register meter "
16782                                 "drop default matcher.");
16783                                 goto policy_error;
16784                         }
16785                         mtrmng->def_matcher[domain] = container_of(entry,
16786                         struct mlx5_flow_dv_matcher, entry);
16787                 }
16788                 /* Create default rule in drop table. */
16789                 if (!mtrmng->def_rule[domain]) {
16790                         i = 0;
16791                         actions[i++] = priv->sh->dr_drop_action;
16792                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16793                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16794                         misc_mask = flow_dv_matcher_enable(value.buf);
16795                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16796                         ret = mlx5_flow_os_create_flow
16797                                 (mtrmng->def_matcher[domain]->matcher_object,
16798                                 (void *)&value, i, actions,
16799                                 &mtrmng->def_rule[domain]);
16800                         if (ret) {
16801                                 DRV_LOG(ERR, "Failed to create meter "
16802                                 "default drop rule for drop table.");
16803                                 goto policy_error;
16804                         }
16805                 }
16806                 if (!fm->drop_cnt)
16807                         continue;
16808                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16809                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16810                         /* Create matchers for Drop. */
16811                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16812                                         (enum modify_reg)mtr_id_reg_c, 0,
16813                                         (mtr_id_mask << mtr_id_offset));
16814                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16815                         matcher.crc = rte_raw_cksum
16816                                         ((const void *)matcher.mask.buf,
16817                                         matcher.mask.size);
16818                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16819                         if (!entry) {
16820                                 DRV_LOG(ERR,
16821                                 "Failed to register meter drop matcher.");
16822                                 goto policy_error;
16823                         }
16824                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16825                                 container_of(entry, struct mlx5_flow_dv_matcher,
16826                                              entry);
16827                 }
16828                 drop_matcher =
16829                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16830                 /* Create drop rule, matching meter_id only. */
16831                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16832                                 (enum modify_reg)mtr_id_reg_c,
16833                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16834                 i = 0;
16835                 cnt = flow_dv_counter_get_by_idx(dev,
16836                                         fm->drop_cnt, NULL);
16837                 actions[i++] = cnt->action;
16838                 actions[i++] = priv->sh->dr_drop_action;
16839                 misc_mask = flow_dv_matcher_enable(value.buf);
16840                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16841                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16842                                                (void *)&value, i, actions,
16843                                                &fm->drop_rule[domain]);
16844                 if (ret) {
16845                         DRV_LOG(ERR, "Failed to create meter "
16846                                 "drop rule for drop table.");
16847                                 goto policy_error;
16848                 }
16849         }
16850         return 0;
16851 policy_error:
16852         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16853                 if (fm->drop_rule[i]) {
16854                         claim_zero(mlx5_flow_os_destroy_flow
16855                                 (fm->drop_rule[i]));
16856                         fm->drop_rule[i] = NULL;
16857                 }
16858         }
16859         return -1;
16860 }
16861
16862 static struct mlx5_flow_meter_sub_policy *
16863 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16864                 struct mlx5_flow_meter_policy *mtr_policy,
16865                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16866                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16867                 bool *is_reuse)
16868 {
16869         struct mlx5_priv *priv = dev->data->dev_private;
16870         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16871         uint32_t sub_policy_idx = 0;
16872         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16873         uint32_t i, j;
16874         struct mlx5_hrxq *hrxq;
16875         struct mlx5_flow_handle dh;
16876         struct mlx5_meter_policy_action_container *act_cnt;
16877         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16878         uint16_t sub_policy_num;
16879         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
16880
16881         MLX5_ASSERT(wks);
16882         rte_spinlock_lock(&mtr_policy->sl);
16883         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16884                 if (!rss_desc[i])
16885                         continue;
16886                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16887                 if (!hrxq_idx[i]) {
16888                         rte_spinlock_unlock(&mtr_policy->sl);
16889                         return NULL;
16890                 }
16891         }
16892         sub_policy_num = (mtr_policy->sub_policy_num >>
16893                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16894                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16895         for (j = 0; j < sub_policy_num; j++) {
16896                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16897                         if (rss_desc[i] &&
16898                             hrxq_idx[i] !=
16899                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16900                                 break;
16901                 }
16902                 if (i >= MLX5_MTR_RTE_COLORS) {
16903                         /*
16904                          * Found the sub policy table with
16905                          * the same queue per color.
16906                          */
16907                         rte_spinlock_unlock(&mtr_policy->sl);
16908                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16909                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16910                         *is_reuse = true;
16911                         return mtr_policy->sub_policys[domain][j];
16912                 }
16913         }
16914         /* Create sub policy. */
16915         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16916                 /* Reuse the first pre-allocated sub_policy. */
16917                 sub_policy = mtr_policy->sub_policys[domain][0];
16918                 sub_policy_idx = sub_policy->idx;
16919         } else {
16920                 sub_policy = mlx5_ipool_zmalloc
16921                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16922                                  &sub_policy_idx);
16923                 if (!sub_policy ||
16924                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16925                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16926                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16927                         goto rss_sub_policy_error;
16928                 }
16929                 sub_policy->idx = sub_policy_idx;
16930                 sub_policy->main_policy = mtr_policy;
16931         }
16932         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16933                 if (!rss_desc[i])
16934                         continue;
16935                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16936                 if (mtr_policy->is_hierarchy) {
16937                         act_cnt = &mtr_policy->act_cnt[i];
16938                         act_cnt->next_sub_policy = next_sub_policy;
16939                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16940                 } else {
16941                         /*
16942                          * Overwrite the last action from
16943                          * RSS action to Queue action.
16944                          */
16945                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16946                                               hrxq_idx[i]);
16947                         if (!hrxq) {
16948                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16949                                 goto rss_sub_policy_error;
16950                         }
16951                         act_cnt = &mtr_policy->act_cnt[i];
16952                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16953                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16954                                 if (act_cnt->rix_mark)
16955                                         wks->mark = 1;
16956                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16957                                 dh.rix_hrxq = hrxq_idx[i];
16958                                 flow_drv_rxq_flags_set(dev, &dh);
16959                         }
16960                 }
16961         }
16962         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16963                                                sub_policy, domain)) {
16964                 DRV_LOG(ERR, "Failed to create policy "
16965                         "rules for ingress domain.");
16966                 goto rss_sub_policy_error;
16967         }
16968         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16969                 i = (mtr_policy->sub_policy_num >>
16970                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16971                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16972                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16973                         DRV_LOG(ERR, "No free sub-policy slot.");
16974                         goto rss_sub_policy_error;
16975                 }
16976                 mtr_policy->sub_policys[domain][i] = sub_policy;
16977                 i++;
16978                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16979                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16980                 mtr_policy->sub_policy_num |=
16981                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16982                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16983         }
16984         rte_spinlock_unlock(&mtr_policy->sl);
16985         *is_reuse = false;
16986         return sub_policy;
16987 rss_sub_policy_error:
16988         if (sub_policy) {
16989                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16990                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16991                         i = (mtr_policy->sub_policy_num >>
16992                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16993                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16994                         mtr_policy->sub_policys[domain][i] = NULL;
16995                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16996                                         sub_policy->idx);
16997                 }
16998         }
16999         rte_spinlock_unlock(&mtr_policy->sl);
17000         return NULL;
17001 }
17002
17003 /**
17004  * Find the policy table for prefix table with RSS.
17005  *
17006  * @param[in] dev
17007  *   Pointer to Ethernet device.
17008  * @param[in] mtr_policy
17009  *   Pointer to meter policy table.
17010  * @param[in] rss_desc
17011  *   Pointer to rss_desc
17012  * @return
17013  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17014  */
17015 static struct mlx5_flow_meter_sub_policy *
17016 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17017                 struct mlx5_flow_meter_policy *mtr_policy,
17018                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17019 {
17020         struct mlx5_priv *priv = dev->data->dev_private;
17021         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17022         struct mlx5_flow_meter_info *next_fm;
17023         struct mlx5_flow_meter_policy *next_policy;
17024         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17025         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17026         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17027         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17028         bool reuse_sub_policy;
17029         uint32_t i = 0;
17030         uint32_t j = 0;
17031
17032         while (true) {
17033                 /* Iterate hierarchy to get all policies in this hierarchy. */
17034                 policies[i++] = mtr_policy;
17035                 if (!mtr_policy->is_hierarchy)
17036                         break;
17037                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17038                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17039                         return NULL;
17040                 }
17041                 next_fm = mlx5_flow_meter_find(priv,
17042                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17043                 if (!next_fm) {
17044                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17045                         return NULL;
17046                 }
17047                 next_policy =
17048                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17049                                                     NULL);
17050                 MLX5_ASSERT(next_policy);
17051                 mtr_policy = next_policy;
17052         }
17053         while (i) {
17054                 /**
17055                  * From last policy to the first one in hierarchy,
17056                  * create / get the sub policy for each of them.
17057                  */
17058                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17059                                                         policies[--i],
17060                                                         rss_desc,
17061                                                         next_sub_policy,
17062                                                         &reuse_sub_policy);
17063                 if (!sub_policy) {
17064                         DRV_LOG(ERR, "Failed to get the sub policy.");
17065                         goto err_exit;
17066                 }
17067                 if (!reuse_sub_policy)
17068                         sub_policies[j++] = sub_policy;
17069                 next_sub_policy = sub_policy;
17070         }
17071         return sub_policy;
17072 err_exit:
17073         while (j) {
17074                 uint16_t sub_policy_num;
17075
17076                 sub_policy = sub_policies[--j];
17077                 mtr_policy = sub_policy->main_policy;
17078                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17079                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17080                         sub_policy_num = (mtr_policy->sub_policy_num >>
17081                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17082                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17083                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17084                                                                         NULL;
17085                         sub_policy_num--;
17086                         mtr_policy->sub_policy_num &=
17087                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17088                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17089                         mtr_policy->sub_policy_num |=
17090                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17091                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17092                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17093                                         sub_policy->idx);
17094                 }
17095         }
17096         return NULL;
17097 }
17098
17099 /**
17100  * Create the sub policy tag rule for all meters in hierarchy.
17101  *
17102  * @param[in] dev
17103  *   Pointer to Ethernet device.
17104  * @param[in] fm
17105  *   Meter information table.
17106  * @param[in] src_port
17107  *   The src port this extra rule should use.
17108  * @param[in] item
17109  *   The src port match item.
17110  * @param[out] error
17111  *   Perform verbose error reporting if not NULL.
17112  * @return
17113  *   0 on success, a negative errno value otherwise and rte_errno is set.
17114  */
17115 static int
17116 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17117                                 struct mlx5_flow_meter_info *fm,
17118                                 int32_t src_port,
17119                                 const struct rte_flow_item *item,
17120                                 struct rte_flow_error *error)
17121 {
17122         struct mlx5_priv *priv = dev->data->dev_private;
17123         struct mlx5_flow_meter_policy *mtr_policy;
17124         struct mlx5_flow_meter_sub_policy *sub_policy;
17125         struct mlx5_flow_meter_info *next_fm = NULL;
17126         struct mlx5_flow_meter_policy *next_policy;
17127         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17128         struct mlx5_flow_tbl_data_entry *tbl_data;
17129         struct mlx5_sub_policy_color_rule *color_rule;
17130         struct mlx5_meter_policy_acts acts;
17131         uint32_t color_reg_c_idx;
17132         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17133         struct rte_flow_attr attr = {
17134                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17135                 .priority = 0,
17136                 .ingress = 0,
17137                 .egress = 0,
17138                 .transfer = 1,
17139                 .reserved = 0,
17140         };
17141         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17142         int i;
17143
17144         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17145         MLX5_ASSERT(mtr_policy);
17146         if (!mtr_policy->is_hierarchy)
17147                 return 0;
17148         next_fm = mlx5_flow_meter_find(priv,
17149                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17150         if (!next_fm) {
17151                 return rte_flow_error_set(error, EINVAL,
17152                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17153                                 "Failed to find next meter in hierarchy.");
17154         }
17155         if (!next_fm->drop_cnt)
17156                 goto exit;
17157         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17158         sub_policy = mtr_policy->sub_policys[domain][0];
17159         for (i = 0; i < RTE_COLORS; i++) {
17160                 bool rule_exist = false;
17161                 struct mlx5_meter_policy_action_container *act_cnt;
17162
17163                 if (i >= RTE_COLOR_YELLOW)
17164                         break;
17165                 TAILQ_FOREACH(color_rule,
17166                               &sub_policy->color_rules[i], next_port)
17167                         if (color_rule->src_port == src_port) {
17168                                 rule_exist = true;
17169                                 break;
17170                         }
17171                 if (rule_exist)
17172                         continue;
17173                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17174                                 sizeof(struct mlx5_sub_policy_color_rule),
17175                                 0, SOCKET_ID_ANY);
17176                 if (!color_rule)
17177                         return rte_flow_error_set(error, ENOMEM,
17178                                 RTE_FLOW_ERROR_TYPE_ACTION,
17179                                 NULL, "No memory to create tag color rule.");
17180                 color_rule->src_port = src_port;
17181                 attr.priority = i;
17182                 next_policy = mlx5_flow_meter_policy_find(dev,
17183                                                 next_fm->policy_id, NULL);
17184                 MLX5_ASSERT(next_policy);
17185                 next_sub_policy = next_policy->sub_policys[domain][0];
17186                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17187                                         struct mlx5_flow_tbl_data_entry, tbl);
17188                 act_cnt = &mtr_policy->act_cnt[i];
17189                 if (mtr_first) {
17190                         acts.dv_actions[0] = next_fm->meter_action;
17191                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17192                 } else {
17193                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17194                         acts.dv_actions[1] = next_fm->meter_action;
17195                 }
17196                 acts.dv_actions[2] = tbl_data->jump.action;
17197                 acts.actions_n = 3;
17198                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17199                         next_fm = NULL;
17200                         goto err_exit;
17201                 }
17202                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17203                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17204                                 &attr, true, item,
17205                                 &color_rule->matcher, error)) {
17206                         rte_flow_error_set(error, errno,
17207                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17208                                 "Failed to create hierarchy meter matcher.");
17209                         goto err_exit;
17210                 }
17211                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17212                                         (enum rte_color)i,
17213                                         color_rule->matcher->matcher_object,
17214                                         acts.actions_n, acts.dv_actions,
17215                                         true, item,
17216                                         &color_rule->rule, &attr)) {
17217                         rte_flow_error_set(error, errno,
17218                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17219                                 "Failed to create hierarchy meter rule.");
17220                         goto err_exit;
17221                 }
17222                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17223                                   color_rule, next_port);
17224         }
17225 exit:
17226         /**
17227          * Recursive call to iterate all meters in hierarchy and
17228          * create needed rules.
17229          */
17230         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17231                                                 src_port, item, error);
17232 err_exit:
17233         if (color_rule) {
17234                 if (color_rule->rule)
17235                         mlx5_flow_os_destroy_flow(color_rule->rule);
17236                 if (color_rule->matcher) {
17237                         struct mlx5_flow_tbl_data_entry *tbl =
17238                                 container_of(color_rule->matcher->tbl,
17239                                                 typeof(*tbl), tbl);
17240                         mlx5_list_unregister(tbl->matchers,
17241                                                 &color_rule->matcher->entry);
17242                 }
17243                 mlx5_free(color_rule);
17244         }
17245         if (next_fm)
17246                 mlx5_flow_meter_detach(priv, next_fm);
17247         return -rte_errno;
17248 }
17249
17250 /**
17251  * Destroy the sub policy table with RX queue.
17252  *
17253  * @param[in] dev
17254  *   Pointer to Ethernet device.
17255  * @param[in] mtr_policy
17256  *   Pointer to meter policy table.
17257  */
17258 static void
17259 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17260                                     struct mlx5_flow_meter_policy *mtr_policy)
17261 {
17262         struct mlx5_priv *priv = dev->data->dev_private;
17263         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17264         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17265         uint32_t i, j;
17266         uint16_t sub_policy_num, new_policy_num;
17267
17268         rte_spinlock_lock(&mtr_policy->sl);
17269         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17270                 switch (mtr_policy->act_cnt[i].fate_action) {
17271                 case MLX5_FLOW_FATE_SHARED_RSS:
17272                         sub_policy_num = (mtr_policy->sub_policy_num >>
17273                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17274                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17275                         new_policy_num = sub_policy_num;
17276                         for (j = 0; j < sub_policy_num; j++) {
17277                                 sub_policy =
17278                                         mtr_policy->sub_policys[domain][j];
17279                                 if (sub_policy) {
17280                                         __flow_dv_destroy_sub_policy_rules(dev,
17281                                                 sub_policy);
17282                                 if (sub_policy !=
17283                                         mtr_policy->sub_policys[domain][0]) {
17284                                         mtr_policy->sub_policys[domain][j] =
17285                                                                 NULL;
17286                                         mlx5_ipool_free
17287                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17288                                                 sub_policy->idx);
17289                                                 new_policy_num--;
17290                                         }
17291                                 }
17292                         }
17293                         if (new_policy_num != sub_policy_num) {
17294                                 mtr_policy->sub_policy_num &=
17295                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17296                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17297                                 mtr_policy->sub_policy_num |=
17298                                 (new_policy_num &
17299                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17300                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17301                         }
17302                         break;
17303                 case MLX5_FLOW_FATE_QUEUE:
17304                         sub_policy = mtr_policy->sub_policys[domain][0];
17305                         __flow_dv_destroy_sub_policy_rules(dev,
17306                                                            sub_policy);
17307                         break;
17308                 default:
17309                         /*Other actions without queue and do nothing*/
17310                         break;
17311                 }
17312         }
17313         rte_spinlock_unlock(&mtr_policy->sl);
17314 }
17315 /**
17316  * Check whether the DR drop action is supported on the root table or not.
17317  *
17318  * Create a simple flow with DR drop action on root table to validate
17319  * if DR drop action on root table is supported or not.
17320  *
17321  * @param[in] dev
17322  *   Pointer to rte_eth_dev structure.
17323  *
17324  * @return
17325  *   0 on success, a negative errno value otherwise and rte_errno is set.
17326  */
17327 int
17328 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17329 {
17330         struct mlx5_priv *priv = dev->data->dev_private;
17331         struct mlx5_dev_ctx_shared *sh = priv->sh;
17332         struct mlx5_flow_dv_match_params mask = {
17333                 .size = sizeof(mask.buf),
17334         };
17335         struct mlx5_flow_dv_match_params value = {
17336                 .size = sizeof(value.buf),
17337         };
17338         struct mlx5dv_flow_matcher_attr dv_attr = {
17339                 .type = IBV_FLOW_ATTR_NORMAL,
17340                 .priority = 0,
17341                 .match_criteria_enable = 0,
17342                 .match_mask = (void *)&mask,
17343         };
17344         struct mlx5_flow_tbl_resource *tbl = NULL;
17345         void *matcher = NULL;
17346         void *flow = NULL;
17347         int ret = -1;
17348
17349         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17350                                         0, 0, 0, NULL);
17351         if (!tbl)
17352                 goto err;
17353         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17354         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17355         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17356                                                tbl->obj, &matcher);
17357         if (ret)
17358                 goto err;
17359         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17360         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17361                                        &sh->dr_drop_action, &flow);
17362 err:
17363         /*
17364          * If DR drop action is not supported on root table, flow create will
17365          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17366          */
17367         if (!flow) {
17368                 if (matcher &&
17369                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17370                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17371                 else
17372                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17373                 ret = -1;
17374         } else {
17375                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17376         }
17377         if (matcher)
17378                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17379         if (tbl)
17380                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17381         return ret;
17382 }
17383
17384 /**
17385  * Validate the batch counter support in root table.
17386  *
17387  * Create a simple flow with invalid counter and drop action on root table to
17388  * validate if batch counter with offset on root table is supported or not.
17389  *
17390  * @param[in] dev
17391  *   Pointer to rte_eth_dev structure.
17392  *
17393  * @return
17394  *   0 on success, a negative errno value otherwise and rte_errno is set.
17395  */
17396 int
17397 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17398 {
17399         struct mlx5_priv *priv = dev->data->dev_private;
17400         struct mlx5_dev_ctx_shared *sh = priv->sh;
17401         struct mlx5_flow_dv_match_params mask = {
17402                 .size = sizeof(mask.buf),
17403         };
17404         struct mlx5_flow_dv_match_params value = {
17405                 .size = sizeof(value.buf),
17406         };
17407         struct mlx5dv_flow_matcher_attr dv_attr = {
17408                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17409                 .priority = 0,
17410                 .match_criteria_enable = 0,
17411                 .match_mask = (void *)&mask,
17412         };
17413         void *actions[2] = { 0 };
17414         struct mlx5_flow_tbl_resource *tbl = NULL;
17415         struct mlx5_devx_obj *dcs = NULL;
17416         void *matcher = NULL;
17417         void *flow = NULL;
17418         int ret = -1;
17419
17420         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17421                                         0, 0, 0, NULL);
17422         if (!tbl)
17423                 goto err;
17424         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17425         if (!dcs)
17426                 goto err;
17427         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17428                                                     &actions[0]);
17429         if (ret)
17430                 goto err;
17431         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17432         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17433         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17434                                                tbl->obj, &matcher);
17435         if (ret)
17436                 goto err;
17437         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17438         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17439                                        actions, &flow);
17440 err:
17441         /*
17442          * If batch counter with offset is not supported, the driver will not
17443          * validate the invalid offset value, flow create should success.
17444          * In this case, it means batch counter is not supported in root table.
17445          *
17446          * Otherwise, if flow create is failed, counter offset is supported.
17447          */
17448         if (flow) {
17449                 DRV_LOG(INFO, "Batch counter is not supported in root "
17450                               "table. Switch to fallback mode.");
17451                 rte_errno = ENOTSUP;
17452                 ret = -rte_errno;
17453                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17454         } else {
17455                 /* Check matcher to make sure validate fail at flow create. */
17456                 if (!matcher || (matcher && errno != EINVAL))
17457                         DRV_LOG(ERR, "Unexpected error in counter offset "
17458                                      "support detection");
17459                 ret = 0;
17460         }
17461         if (actions[0])
17462                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17463         if (matcher)
17464                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17465         if (tbl)
17466                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17467         if (dcs)
17468                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17469         return ret;
17470 }
17471
17472 /**
17473  * Query a devx counter.
17474  *
17475  * @param[in] dev
17476  *   Pointer to the Ethernet device structure.
17477  * @param[in] cnt
17478  *   Index to the flow counter.
17479  * @param[in] clear
17480  *   Set to clear the counter statistics.
17481  * @param[out] pkts
17482  *   The statistics value of packets.
17483  * @param[out] bytes
17484  *   The statistics value of bytes.
17485  *
17486  * @return
17487  *   0 on success, otherwise return -1.
17488  */
17489 static int
17490 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17491                       uint64_t *pkts, uint64_t *bytes)
17492 {
17493         struct mlx5_priv *priv = dev->data->dev_private;
17494         struct mlx5_flow_counter *cnt;
17495         uint64_t inn_pkts, inn_bytes;
17496         int ret;
17497
17498         if (!priv->sh->cdev->config.devx)
17499                 return -1;
17500
17501         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17502         if (ret)
17503                 return -1;
17504         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17505         *pkts = inn_pkts - cnt->hits;
17506         *bytes = inn_bytes - cnt->bytes;
17507         if (clear) {
17508                 cnt->hits = inn_pkts;
17509                 cnt->bytes = inn_bytes;
17510         }
17511         return 0;
17512 }
17513
17514 /**
17515  * Get aged-out flows.
17516  *
17517  * @param[in] dev
17518  *   Pointer to the Ethernet device structure.
17519  * @param[in] context
17520  *   The address of an array of pointers to the aged-out flows contexts.
17521  * @param[in] nb_contexts
17522  *   The length of context array pointers.
17523  * @param[out] error
17524  *   Perform verbose error reporting if not NULL. Initialized in case of
17525  *   error only.
17526  *
17527  * @return
17528  *   how many contexts get in success, otherwise negative errno value.
17529  *   if nb_contexts is 0, return the amount of all aged contexts.
17530  *   if nb_contexts is not 0 , return the amount of aged flows reported
17531  *   in the context array.
17532  * @note: only stub for now
17533  */
17534 static int
17535 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17536                     void **context,
17537                     uint32_t nb_contexts,
17538                     struct rte_flow_error *error)
17539 {
17540         struct mlx5_priv *priv = dev->data->dev_private;
17541         struct mlx5_age_info *age_info;
17542         struct mlx5_age_param *age_param;
17543         struct mlx5_flow_counter *counter;
17544         struct mlx5_aso_age_action *act;
17545         int nb_flows = 0;
17546
17547         if (nb_contexts && !context)
17548                 return rte_flow_error_set(error, EINVAL,
17549                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17550                                           NULL, "empty context");
17551         age_info = GET_PORT_AGE_INFO(priv);
17552         rte_spinlock_lock(&age_info->aged_sl);
17553         LIST_FOREACH(act, &age_info->aged_aso, next) {
17554                 nb_flows++;
17555                 if (nb_contexts) {
17556                         context[nb_flows - 1] =
17557                                                 act->age_params.context;
17558                         if (!(--nb_contexts))
17559                                 break;
17560                 }
17561         }
17562         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17563                 nb_flows++;
17564                 if (nb_contexts) {
17565                         age_param = MLX5_CNT_TO_AGE(counter);
17566                         context[nb_flows - 1] = age_param->context;
17567                         if (!(--nb_contexts))
17568                                 break;
17569                 }
17570         }
17571         rte_spinlock_unlock(&age_info->aged_sl);
17572         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17573         return nb_flows;
17574 }
17575
17576 /*
17577  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17578  */
17579 static uint32_t
17580 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17581 {
17582         return flow_dv_counter_alloc(dev, 0);
17583 }
17584
17585 /**
17586  * Validate indirect action.
17587  * Dispatcher for action type specific validation.
17588  *
17589  * @param[in] dev
17590  *   Pointer to the Ethernet device structure.
17591  * @param[in] conf
17592  *   Indirect action configuration.
17593  * @param[in] action
17594  *   The indirect action object to validate.
17595  * @param[out] error
17596  *   Perform verbose error reporting if not NULL. Initialized in case of
17597  *   error only.
17598  *
17599  * @return
17600  *   0 on success, otherwise negative errno value.
17601  */
17602 static int
17603 flow_dv_action_validate(struct rte_eth_dev *dev,
17604                         const struct rte_flow_indir_action_conf *conf,
17605                         const struct rte_flow_action *action,
17606                         struct rte_flow_error *err)
17607 {
17608         struct mlx5_priv *priv = dev->data->dev_private;
17609
17610         RTE_SET_USED(conf);
17611         switch (action->type) {
17612         case RTE_FLOW_ACTION_TYPE_RSS:
17613                 /*
17614                  * priv->obj_ops is set according to driver capabilities.
17615                  * When DevX capabilities are
17616                  * sufficient, it is set to devx_obj_ops.
17617                  * Otherwise, it is set to ibv_obj_ops.
17618                  * ibv_obj_ops doesn't support ind_table_modify operation.
17619                  * In this case the indirect RSS action can't be used.
17620                  */
17621                 if (priv->obj_ops.ind_table_modify == NULL)
17622                         return rte_flow_error_set
17623                                         (err, ENOTSUP,
17624                                          RTE_FLOW_ERROR_TYPE_ACTION,
17625                                          NULL,
17626                                          "Indirect RSS action not supported");
17627                 return mlx5_validate_action_rss(dev, action, err);
17628         case RTE_FLOW_ACTION_TYPE_AGE:
17629                 if (!priv->sh->aso_age_mng)
17630                         return rte_flow_error_set(err, ENOTSUP,
17631                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17632                                                 NULL,
17633                                                 "Indirect age action not supported");
17634                 return flow_dv_validate_action_age(0, action, dev, err);
17635         case RTE_FLOW_ACTION_TYPE_COUNT:
17636                 return flow_dv_validate_action_count(dev, true, 0, err);
17637         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17638                 if (!priv->sh->ct_aso_en)
17639                         return rte_flow_error_set(err, ENOTSUP,
17640                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17641                                         "ASO CT is not supported");
17642                 return mlx5_validate_action_ct(dev, action->conf, err);
17643         default:
17644                 return rte_flow_error_set(err, ENOTSUP,
17645                                           RTE_FLOW_ERROR_TYPE_ACTION,
17646                                           NULL,
17647                                           "action type not supported");
17648         }
17649 }
17650
17651 /*
17652  * Check if the RSS configurations for colors of a meter policy match
17653  * each other, except the queues.
17654  *
17655  * @param[in] r1
17656  *   Pointer to the first RSS flow action.
17657  * @param[in] r2
17658  *   Pointer to the second RSS flow action.
17659  *
17660  * @return
17661  *   0 on match, 1 on conflict.
17662  */
17663 static inline int
17664 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17665                                const struct rte_flow_action_rss *r2)
17666 {
17667         if (r1 == NULL || r2 == NULL)
17668                 return 0;
17669         if (!(r1->level <= 1 && r2->level <= 1) &&
17670             !(r1->level > 1 && r2->level > 1))
17671                 return 1;
17672         if (r1->types != r2->types &&
17673             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17674               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17675                 return 1;
17676         if (r1->key || r2->key) {
17677                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17678                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17679
17680                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17681                         return 1;
17682         }
17683         return 0;
17684 }
17685
17686 /**
17687  * Validate the meter hierarchy chain for meter policy.
17688  *
17689  * @param[in] dev
17690  *   Pointer to the Ethernet device structure.
17691  * @param[in] meter_id
17692  *   Meter id.
17693  * @param[in] action_flags
17694  *   Holds the actions detected until now.
17695  * @param[out] is_rss
17696  *   Is RSS or not.
17697  * @param[out] hierarchy_domain
17698  *   The domain bitmap for hierarchy policy.
17699  * @param[out] error
17700  *   Perform verbose error reporting if not NULL. Initialized in case of
17701  *   error only.
17702  *
17703  * @return
17704  *   0 on success, otherwise negative errno value with error set.
17705  */
17706 static int
17707 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17708                                   uint32_t meter_id,
17709                                   uint64_t action_flags,
17710                                   bool *is_rss,
17711                                   uint8_t *hierarchy_domain,
17712                                   struct rte_mtr_error *error)
17713 {
17714         struct mlx5_priv *priv = dev->data->dev_private;
17715         struct mlx5_flow_meter_info *fm;
17716         struct mlx5_flow_meter_policy *policy;
17717         uint8_t cnt = 1;
17718
17719         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17720                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17721                 return -rte_mtr_error_set(error, EINVAL,
17722                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17723                                         NULL,
17724                                         "Multiple fate actions not supported.");
17725         *hierarchy_domain = 0;
17726         while (true) {
17727                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17728                 if (!fm)
17729                         return -rte_mtr_error_set(error, EINVAL,
17730                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17731                                         "Meter not found in meter hierarchy.");
17732                 if (fm->def_policy)
17733                         return -rte_mtr_error_set(error, EINVAL,
17734                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17735                         "Non termination meter not supported in hierarchy.");
17736                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17737                 MLX5_ASSERT(policy);
17738                 /**
17739                  * Only inherit the supported domains of the first meter in
17740                  * hierarchy.
17741                  * One meter supports at least one domain.
17742                  */
17743                 if (!*hierarchy_domain) {
17744                         if (policy->transfer)
17745                                 *hierarchy_domain |=
17746                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17747                         if (policy->ingress)
17748                                 *hierarchy_domain |=
17749                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17750                         if (policy->egress)
17751                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17752                 }
17753                 if (!policy->is_hierarchy) {
17754                         *is_rss = policy->is_rss;
17755                         break;
17756                 }
17757                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17758                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17759                         return -rte_mtr_error_set(error, EINVAL,
17760                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17761                                         "Exceed max hierarchy meter number.");
17762         }
17763         return 0;
17764 }
17765
17766 /**
17767  * Validate meter policy actions.
17768  * Dispatcher for action type specific validation.
17769  *
17770  * @param[in] dev
17771  *   Pointer to the Ethernet device structure.
17772  * @param[in] action
17773  *   The meter policy action object to validate.
17774  * @param[in] attr
17775  *   Attributes of flow to determine steering domain.
17776  * @param[out] error
17777  *   Perform verbose error reporting if not NULL. Initialized in case of
17778  *   error only.
17779  *
17780  * @return
17781  *   0 on success, otherwise negative errno value.
17782  */
17783 static int
17784 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17785                         const struct rte_flow_action *actions[RTE_COLORS],
17786                         struct rte_flow_attr *attr,
17787                         bool *is_rss,
17788                         uint8_t *domain_bitmap,
17789                         uint8_t *policy_mode,
17790                         struct rte_mtr_error *error)
17791 {
17792         struct mlx5_priv *priv = dev->data->dev_private;
17793         struct mlx5_sh_config *dev_conf = &priv->sh->config;
17794         const struct rte_flow_action *act;
17795         uint64_t action_flags[RTE_COLORS] = {0};
17796         int actions_n;
17797         int i, ret;
17798         struct rte_flow_error flow_err;
17799         uint8_t domain_color[RTE_COLORS] = {0};
17800         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17801         uint8_t hierarchy_domain = 0;
17802         const struct rte_flow_action_meter *mtr;
17803         bool def_green = false;
17804         bool def_yellow = false;
17805         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17806
17807         if (!dev_conf->dv_esw_en)
17808                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17809         *domain_bitmap = def_domain;
17810         /* Red color could only support DROP action. */
17811         if (!actions[RTE_COLOR_RED] ||
17812             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17813                 return -rte_mtr_error_set(error, ENOTSUP,
17814                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17815                                 NULL, "Red color only supports drop action.");
17816         /*
17817          * Check default policy actions:
17818          * Green / Yellow: no action, Red: drop action
17819          * Either G or Y will trigger default policy actions to be created.
17820          */
17821         if (!actions[RTE_COLOR_GREEN] ||
17822             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17823                 def_green = true;
17824         if (!actions[RTE_COLOR_YELLOW] ||
17825             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17826                 def_yellow = true;
17827         if (def_green && def_yellow) {
17828                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17829                 return 0;
17830         } else if (!def_green && def_yellow) {
17831                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17832         } else if (def_green && !def_yellow) {
17833                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17834         } else {
17835                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17836         }
17837         /* Set to empty string in case of NULL pointer access by user. */
17838         flow_err.message = "";
17839         for (i = 0; i < RTE_COLORS; i++) {
17840                 act = actions[i];
17841                 for (action_flags[i] = 0, actions_n = 0;
17842                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17843                      act++) {
17844                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17845                                 return -rte_mtr_error_set(error, ENOTSUP,
17846                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17847                                           NULL, "too many actions");
17848                         switch (act->type) {
17849                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17850                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17851                                 if (!dev_conf->dv_esw_en)
17852                                         return -rte_mtr_error_set(error,
17853                                         ENOTSUP,
17854                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17855                                         NULL, "PORT action validate check"
17856                                         " fail for ESW disable");
17857                                 ret = flow_dv_validate_action_port_id(dev,
17858                                                 action_flags[i],
17859                                                 act, attr, &flow_err);
17860                                 if (ret)
17861                                         return -rte_mtr_error_set(error,
17862                                         ENOTSUP,
17863                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17864                                         NULL, flow_err.message ?
17865                                         flow_err.message :
17866                                         "PORT action validate check fail");
17867                                 ++actions_n;
17868                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17869                                 break;
17870                         case RTE_FLOW_ACTION_TYPE_MARK:
17871                                 ret = flow_dv_validate_action_mark(dev, act,
17872                                                            action_flags[i],
17873                                                            attr, &flow_err);
17874                                 if (ret < 0)
17875                                         return -rte_mtr_error_set(error,
17876                                         ENOTSUP,
17877                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17878                                         NULL, flow_err.message ?
17879                                         flow_err.message :
17880                                         "Mark action validate check fail");
17881                                 if (dev_conf->dv_xmeta_en !=
17882                                         MLX5_XMETA_MODE_LEGACY)
17883                                         return -rte_mtr_error_set(error,
17884                                         ENOTSUP,
17885                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17886                                         NULL, "Extend MARK action is "
17887                                         "not supported. Please try use "
17888                                         "default policy for meter.");
17889                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17890                                 ++actions_n;
17891                                 break;
17892                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17893                                 ret = flow_dv_validate_action_set_tag(dev,
17894                                                         act, action_flags[i],
17895                                                         attr, &flow_err);
17896                                 if (ret)
17897                                         return -rte_mtr_error_set(error,
17898                                         ENOTSUP,
17899                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17900                                         NULL, flow_err.message ?
17901                                         flow_err.message :
17902                                         "Set tag action validate check fail");
17903                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17904                                 ++actions_n;
17905                                 break;
17906                         case RTE_FLOW_ACTION_TYPE_DROP:
17907                                 ret = mlx5_flow_validate_action_drop
17908                                         (action_flags[i], attr, &flow_err);
17909                                 if (ret < 0)
17910                                         return -rte_mtr_error_set(error,
17911                                         ENOTSUP,
17912                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17913                                         NULL, flow_err.message ?
17914                                         flow_err.message :
17915                                         "Drop action validate check fail");
17916                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17917                                 ++actions_n;
17918                                 break;
17919                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17920                                 /*
17921                                  * Check whether extensive
17922                                  * metadata feature is engaged.
17923                                  */
17924                                 if (dev_conf->dv_flow_en &&
17925                                     (dev_conf->dv_xmeta_en !=
17926                                      MLX5_XMETA_MODE_LEGACY) &&
17927                                     mlx5_flow_ext_mreg_supported(dev))
17928                                         return -rte_mtr_error_set(error,
17929                                           ENOTSUP,
17930                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17931                                           NULL, "Queue action with meta "
17932                                           "is not supported. Please try use "
17933                                           "default policy for meter.");
17934                                 ret = mlx5_flow_validate_action_queue(act,
17935                                                         action_flags[i], dev,
17936                                                         attr, &flow_err);
17937                                 if (ret < 0)
17938                                         return -rte_mtr_error_set(error,
17939                                           ENOTSUP,
17940                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17941                                           NULL, flow_err.message ?
17942                                           flow_err.message :
17943                                           "Queue action validate check fail");
17944                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17945                                 ++actions_n;
17946                                 break;
17947                         case RTE_FLOW_ACTION_TYPE_RSS:
17948                                 if (dev_conf->dv_flow_en &&
17949                                     (dev_conf->dv_xmeta_en !=
17950                                      MLX5_XMETA_MODE_LEGACY) &&
17951                                     mlx5_flow_ext_mreg_supported(dev))
17952                                         return -rte_mtr_error_set(error,
17953                                           ENOTSUP,
17954                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17955                                           NULL, "RSS action with meta "
17956                                           "is not supported. Please try use "
17957                                           "default policy for meter.");
17958                                 ret = mlx5_validate_action_rss(dev, act,
17959                                                                &flow_err);
17960                                 if (ret < 0)
17961                                         return -rte_mtr_error_set(error,
17962                                           ENOTSUP,
17963                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17964                                           NULL, flow_err.message ?
17965                                           flow_err.message :
17966                                           "RSS action validate check fail");
17967                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17968                                 ++actions_n;
17969                                 /* Either G or Y will set the RSS. */
17970                                 rss_color[i] = act->conf;
17971                                 break;
17972                         case RTE_FLOW_ACTION_TYPE_JUMP:
17973                                 ret = flow_dv_validate_action_jump(dev,
17974                                         NULL, act, action_flags[i],
17975                                         attr, true, &flow_err);
17976                                 if (ret)
17977                                         return -rte_mtr_error_set(error,
17978                                           ENOTSUP,
17979                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17980                                           NULL, flow_err.message ?
17981                                           flow_err.message :
17982                                           "Jump action validate check fail");
17983                                 ++actions_n;
17984                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17985                                 break;
17986                         /*
17987                          * Only the last meter in the hierarchy will support
17988                          * the YELLOW color steering. Then in the meter policy
17989                          * actions list, there should be no other meter inside.
17990                          */
17991                         case RTE_FLOW_ACTION_TYPE_METER:
17992                                 if (i != RTE_COLOR_GREEN)
17993                                         return -rte_mtr_error_set(error,
17994                                                 ENOTSUP,
17995                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17996                                                 NULL,
17997                                                 "Meter hierarchy only supports GREEN color.");
17998                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17999                                         return -rte_mtr_error_set(error,
18000                                                 ENOTSUP,
18001                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18002                                                 NULL,
18003                                                 "No yellow policy should be provided in meter hierarchy.");
18004                                 mtr = act->conf;
18005                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18006                                                         mtr->mtr_id,
18007                                                         action_flags[i],
18008                                                         is_rss,
18009                                                         &hierarchy_domain,
18010                                                         error);
18011                                 if (ret)
18012                                         return ret;
18013                                 ++actions_n;
18014                                 action_flags[i] |=
18015                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18016                                 break;
18017                         default:
18018                                 return -rte_mtr_error_set(error, ENOTSUP,
18019                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18020                                         NULL,
18021                                         "Doesn't support optional action");
18022                         }
18023                 }
18024                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18025                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18026                 } else if ((action_flags[i] &
18027                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18028                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18029                         /*
18030                          * Only support MLX5_XMETA_MODE_LEGACY
18031                          * so MARK action is only in ingress domain.
18032                          */
18033                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18034                 } else {
18035                         domain_color[i] = def_domain;
18036                         if (action_flags[i] &&
18037                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18038                                 domain_color[i] &=
18039                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18040                 }
18041                 if (action_flags[i] &
18042                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18043                         domain_color[i] &= hierarchy_domain;
18044                 /*
18045                  * Non-termination actions only support NIC Tx domain.
18046                  * The adjustion should be skipped when there is no
18047                  * action or only END is provided. The default domains
18048                  * bit-mask is set to find the MIN intersection.
18049                  * The action flags checking should also be skipped.
18050                  */
18051                 if ((def_green && i == RTE_COLOR_GREEN) ||
18052                     (def_yellow && i == RTE_COLOR_YELLOW))
18053                         continue;
18054                 /*
18055                  * Validate the drop action mutual exclusion
18056                  * with other actions. Drop action is mutually-exclusive
18057                  * with any other action, except for Count action.
18058                  */
18059                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18060                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18061                         return -rte_mtr_error_set(error, ENOTSUP,
18062                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18063                                 NULL, "Drop action is mutually-exclusive "
18064                                 "with any other action");
18065                 }
18066                 /* Eswitch has few restrictions on using items and actions */
18067                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18068                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18069                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18070                                 return -rte_mtr_error_set(error, ENOTSUP,
18071                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18072                                         NULL, "unsupported action MARK");
18073                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18074                                 return -rte_mtr_error_set(error, ENOTSUP,
18075                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18076                                         NULL, "unsupported action QUEUE");
18077                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18078                                 return -rte_mtr_error_set(error, ENOTSUP,
18079                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18080                                         NULL, "unsupported action RSS");
18081                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18082                                 return -rte_mtr_error_set(error, ENOTSUP,
18083                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18084                                         NULL, "no fate action is found");
18085                 } else {
18086                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18087                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18088                                 if ((domain_color[i] &
18089                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18090                                         domain_color[i] =
18091                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18092                                 else
18093                                         return -rte_mtr_error_set(error,
18094                                                 ENOTSUP,
18095                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18096                                                 NULL,
18097                                                 "no fate action is found");
18098                         }
18099                 }
18100         }
18101         /* If both colors have RSS, the attributes should be the same. */
18102         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18103                                            rss_color[RTE_COLOR_YELLOW]))
18104                 return -rte_mtr_error_set(error, EINVAL,
18105                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18106                                           NULL, "policy RSS attr conflict");
18107         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18108                 *is_rss = true;
18109         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18110         if (!def_green && !def_yellow &&
18111             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18112             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18113             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18114                 return -rte_mtr_error_set(error, EINVAL,
18115                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18116                                           NULL, "policy domains conflict");
18117         /*
18118          * At least one color policy is listed in the actions, the domains
18119          * to be supported should be the intersection.
18120          */
18121         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18122                          domain_color[RTE_COLOR_YELLOW];
18123         return 0;
18124 }
18125
18126 static int
18127 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18128 {
18129         struct mlx5_priv *priv = dev->data->dev_private;
18130         int ret = 0;
18131
18132         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18133                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18134                                                 flags);
18135                 if (ret != 0)
18136                         return ret;
18137         }
18138         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18139                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18140                 if (ret != 0)
18141                         return ret;
18142         }
18143         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18144                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18145                 if (ret != 0)
18146                         return ret;
18147         }
18148         return 0;
18149 }
18150
18151 /**
18152  * Discover the number of available flow priorities
18153  * by trying to create a flow with the highest priority value
18154  * for each possible number.
18155  *
18156  * @param[in] dev
18157  *   Ethernet device.
18158  * @param[in] vprio
18159  *   List of possible number of available priorities.
18160  * @param[in] vprio_n
18161  *   Size of @p vprio array.
18162  * @return
18163  *   On success, number of available flow priorities.
18164  *   On failure, a negative errno-style code and rte_errno is set.
18165  */
18166 static int
18167 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18168                             const uint16_t *vprio, int vprio_n)
18169 {
18170         struct mlx5_priv *priv = dev->data->dev_private;
18171         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18172         struct rte_flow_item_eth eth;
18173         struct rte_flow_item item = {
18174                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18175                 .spec = &eth,
18176                 .mask = &eth,
18177         };
18178         struct mlx5_flow_dv_matcher matcher = {
18179                 .mask = {
18180                         .size = sizeof(matcher.mask.buf),
18181                 },
18182         };
18183         union mlx5_flow_tbl_key tbl_key;
18184         struct mlx5_flow flow;
18185         void *action;
18186         struct rte_flow_error error;
18187         uint8_t misc_mask;
18188         int i, err, ret = -ENOTSUP;
18189
18190         /*
18191          * Prepare a flow with a catch-all pattern and a drop action.
18192          * Use drop queue, because shared drop action may be unavailable.
18193          */
18194         action = priv->drop_queue.hrxq->action;
18195         if (action == NULL) {
18196                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18197                 rte_errno = ENOTSUP;
18198                 return -rte_errno;
18199         }
18200         memset(&flow, 0, sizeof(flow));
18201         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18202         if (flow.handle == NULL) {
18203                 DRV_LOG(ERR, "Cannot create flow handle");
18204                 rte_errno = ENOMEM;
18205                 return -rte_errno;
18206         }
18207         flow.ingress = true;
18208         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18209         flow.dv.actions[0] = action;
18210         flow.dv.actions_n = 1;
18211         memset(&eth, 0, sizeof(eth));
18212         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18213                                    &item, /* inner */ false, /* group */ 0);
18214         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18215         for (i = 0; i < vprio_n; i++) {
18216                 /* Configure the next proposed maximum priority. */
18217                 matcher.priority = vprio[i] - 1;
18218                 memset(&tbl_key, 0, sizeof(tbl_key));
18219                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18220                                                /* tunnel */ NULL,
18221                                                /* group */ 0,
18222                                                &error);
18223                 if (err != 0) {
18224                         /* This action is pure SW and must always succeed. */
18225                         DRV_LOG(ERR, "Cannot register matcher");
18226                         ret = -rte_errno;
18227                         break;
18228                 }
18229                 /* Try to apply the flow to HW. */
18230                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18231                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18232                 err = mlx5_flow_os_create_flow
18233                                 (flow.handle->dvh.matcher->matcher_object,
18234                                  (void *)&flow.dv.value, flow.dv.actions_n,
18235                                  flow.dv.actions, &flow.handle->drv_flow);
18236                 if (err == 0) {
18237                         claim_zero(mlx5_flow_os_destroy_flow
18238                                                 (flow.handle->drv_flow));
18239                         flow.handle->drv_flow = NULL;
18240                 }
18241                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18242                 if (err != 0)
18243                         break;
18244                 ret = vprio[i];
18245         }
18246         mlx5_ipool_free(pool, flow.handle_idx);
18247         /* Set rte_errno if no expected priority value matched. */
18248         if (ret < 0)
18249                 rte_errno = -ret;
18250         return ret;
18251 }
18252
18253 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18254         .validate = flow_dv_validate,
18255         .prepare = flow_dv_prepare,
18256         .translate = flow_dv_translate,
18257         .apply = flow_dv_apply,
18258         .remove = flow_dv_remove,
18259         .destroy = flow_dv_destroy,
18260         .query = flow_dv_query,
18261         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18262         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18263         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18264         .create_meter = flow_dv_mtr_alloc,
18265         .free_meter = flow_dv_aso_mtr_release_to_pool,
18266         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18267         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18268         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18269         .create_policy_rules = flow_dv_create_policy_rules,
18270         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18271         .create_def_policy = flow_dv_create_def_policy,
18272         .destroy_def_policy = flow_dv_destroy_def_policy,
18273         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18274         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18275         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18276         .counter_alloc = flow_dv_counter_allocate,
18277         .counter_free = flow_dv_counter_free,
18278         .counter_query = flow_dv_counter_query,
18279         .get_aged_flows = flow_dv_get_aged_flows,
18280         .action_validate = flow_dv_action_validate,
18281         .action_create = flow_dv_action_create,
18282         .action_destroy = flow_dv_action_destroy,
18283         .action_update = flow_dv_action_update,
18284         .action_query = flow_dv_action_query,
18285         .sync_domain = flow_dv_sync_domain,
18286         .discover_priorities = flow_dv_discover_priorities,
18287         .item_create = flow_dv_item_create,
18288         .item_release = flow_dv_item_release,
18289 };
18290
18291 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18292