1b4e15dff1cd3c219c4f78128a0a6b8098e7ffac
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1387                            enum rte_flow_field_id field, int inherit,
1388                            const struct rte_flow_attr *attr,
1389                            struct rte_flow_error *error)
1390 {
1391         struct mlx5_priv *priv = dev->data->dev_private;
1392
1393         switch (field) {
1394         case RTE_FLOW_FIELD_START:
1395                 return 32;
1396         case RTE_FLOW_FIELD_MAC_DST:
1397         case RTE_FLOW_FIELD_MAC_SRC:
1398                 return 48;
1399         case RTE_FLOW_FIELD_VLAN_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_VLAN_ID:
1402                 return 12;
1403         case RTE_FLOW_FIELD_MAC_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_IPV4_DSCP:
1406                 return 6;
1407         case RTE_FLOW_FIELD_IPV4_TTL:
1408                 return 8;
1409         case RTE_FLOW_FIELD_IPV4_SRC:
1410         case RTE_FLOW_FIELD_IPV4_DST:
1411                 return 32;
1412         case RTE_FLOW_FIELD_IPV6_DSCP:
1413                 return 6;
1414         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1415                 return 8;
1416         case RTE_FLOW_FIELD_IPV6_SRC:
1417         case RTE_FLOW_FIELD_IPV6_DST:
1418                 return 128;
1419         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1420         case RTE_FLOW_FIELD_TCP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1423         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1424                 return 32;
1425         case RTE_FLOW_FIELD_TCP_FLAGS:
1426                 return 9;
1427         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1428         case RTE_FLOW_FIELD_UDP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_VXLAN_VNI:
1431         case RTE_FLOW_FIELD_GENEVE_VNI:
1432                 return 24;
1433         case RTE_FLOW_FIELD_GTP_TEID:
1434         case RTE_FLOW_FIELD_TAG:
1435                 return 32;
1436         case RTE_FLOW_FIELD_MARK:
1437                 return __builtin_popcount(priv->sh->dv_mark_mask);
1438         case RTE_FLOW_FIELD_META:
1439                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1440                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1441         case RTE_FLOW_FIELD_POINTER:
1442         case RTE_FLOW_FIELD_VALUE:
1443                 return inherit < 0 ? 0 : inherit;
1444         default:
1445                 MLX5_ASSERT(false);
1446         }
1447         return 0;
1448 }
1449
1450 static void
1451 mlx5_flow_field_id_to_modify_info
1452                 (const struct rte_flow_action_modify_data *data,
1453                  struct field_modify_info *info, uint32_t *mask,
1454                  uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
1455                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 4,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[1] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 0,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1487                                                     (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 4,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, 0,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 4,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[1] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 0,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1517                                                     (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 4,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, 0,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4, 12,
1583                                                 MLX5_MODI_OUT_SIPV6_31_0};
1584                                 if (width < 32) {
1585                                         mask[3] =
1586                                                 rte_cpu_to_be_32(0xffffffff >>
1587                                                                  (32 - width));
1588                                         width = 0;
1589                                 } else {
1590                                         mask[3] = RTE_BE32(0xffffffff);
1591                                         width -= 32;
1592                                 }
1593                                 if (!width)
1594                                         break;
1595                                 ++idx;
1596                         }
1597                         if (data->offset < 64) {
1598                                 info[idx] = (struct field_modify_info){4, 8,
1599                                                 MLX5_MODI_OUT_SIPV6_63_32};
1600                                 if (width < 32) {
1601                                         mask[2] =
1602                                                 rte_cpu_to_be_32(0xffffffff >>
1603                                                                  (32 - width));
1604                                         width = 0;
1605                                 } else {
1606                                         mask[2] = RTE_BE32(0xffffffff);
1607                                         width -= 32;
1608                                 }
1609                                 if (!width)
1610                                         break;
1611                                 ++idx;
1612                         }
1613                         if (data->offset < 96) {
1614                                 info[idx] = (struct field_modify_info){4, 4,
1615                                                 MLX5_MODI_OUT_SIPV6_95_64};
1616                                 if (width < 32) {
1617                                         mask[1] =
1618                                                 rte_cpu_to_be_32(0xffffffff >>
1619                                                                  (32 - width));
1620                                         width = 0;
1621                                 } else {
1622                                         mask[1] = RTE_BE32(0xffffffff);
1623                                         width -= 32;
1624                                 }
1625                                 if (!width)
1626                                         break;
1627                                 ++idx;
1628                         }
1629                         info[idx] = (struct field_modify_info){4, 0,
1630                                                 MLX5_MODI_OUT_SIPV6_127_96};
1631                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1632                 } else {
1633                         if (data->offset < 32)
1634                                 info[idx++] = (struct field_modify_info){4, 12,
1635                                                 MLX5_MODI_OUT_SIPV6_31_0};
1636                         if (data->offset < 64)
1637                                 info[idx++] = (struct field_modify_info){4, 8,
1638                                                 MLX5_MODI_OUT_SIPV6_63_32};
1639                         if (data->offset < 96)
1640                                 info[idx++] = (struct field_modify_info){4, 4,
1641                                                 MLX5_MODI_OUT_SIPV6_95_64};
1642                         if (data->offset < 128)
1643                                 info[idx++] = (struct field_modify_info){4, 0,
1644                                                 MLX5_MODI_OUT_SIPV6_127_96};
1645                 }
1646                 break;
1647         case RTE_FLOW_FIELD_IPV6_DST:
1648                 if (mask) {
1649                         if (data->offset < 32) {
1650                                 info[idx] = (struct field_modify_info){4, 12,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[3] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[3] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4, 8,
1667                                                 MLX5_MODI_OUT_DIPV6_63_32};
1668                                 if (width < 32) {
1669                                         mask[2] =
1670                                                 rte_cpu_to_be_32(0xffffffff >>
1671                                                                  (32 - width));
1672                                         width = 0;
1673                                 } else {
1674                                         mask[2] = RTE_BE32(0xffffffff);
1675                                         width -= 32;
1676                                 }
1677                                 if (!width)
1678                                         break;
1679                                 ++idx;
1680                         }
1681                         if (data->offset < 96) {
1682                                 info[idx] = (struct field_modify_info){4, 4,
1683                                                 MLX5_MODI_OUT_DIPV6_95_64};
1684                                 if (width < 32) {
1685                                         mask[1] =
1686                                                 rte_cpu_to_be_32(0xffffffff >>
1687                                                                  (32 - width));
1688                                         width = 0;
1689                                 } else {
1690                                         mask[1] = RTE_BE32(0xffffffff);
1691                                         width -= 32;
1692                                 }
1693                                 if (!width)
1694                                         break;
1695                                 ++idx;
1696                         }
1697                         info[idx] = (struct field_modify_info){4, 0,
1698                                                 MLX5_MODI_OUT_DIPV6_127_96};
1699                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1700                 } else {
1701                         if (data->offset < 32)
1702                                 info[idx++] = (struct field_modify_info){4, 12,
1703                                                 MLX5_MODI_OUT_DIPV6_31_0};
1704                         if (data->offset < 64)
1705                                 info[idx++] = (struct field_modify_info){4, 8,
1706                                                 MLX5_MODI_OUT_DIPV6_63_32};
1707                         if (data->offset < 96)
1708                                 info[idx++] = (struct field_modify_info){4, 4,
1709                                                 MLX5_MODI_OUT_DIPV6_95_64};
1710                         if (data->offset < 128)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_127_96};
1713                 }
1714                 break;
1715         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1716                 info[idx] = (struct field_modify_info){2, 0,
1717                                         MLX5_MODI_OUT_TCP_SPORT};
1718                 if (mask)
1719                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TCP_PORT_DST:
1722                 info[idx] = (struct field_modify_info){2, 0,
1723                                         MLX5_MODI_OUT_TCP_DPORT};
1724                 if (mask)
1725                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1726                 break;
1727         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1728                 info[idx] = (struct field_modify_info){4, 0,
1729                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1730                 if (mask)
1731                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1732                                                      (32 - width));
1733                 break;
1734         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1735                 info[idx] = (struct field_modify_info){4, 0,
1736                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1737                 if (mask)
1738                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1739                                                      (32 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_TCP_FLAGS:
1742                 info[idx] = (struct field_modify_info){2, 0,
1743                                         MLX5_MODI_OUT_TCP_FLAGS};
1744                 if (mask)
1745                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1746                 break;
1747         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1748                 info[idx] = (struct field_modify_info){2, 0,
1749                                         MLX5_MODI_OUT_UDP_SPORT};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1752                 break;
1753         case RTE_FLOW_FIELD_UDP_PORT_DST:
1754                 info[idx] = (struct field_modify_info){2, 0,
1755                                         MLX5_MODI_OUT_UDP_DPORT};
1756                 if (mask)
1757                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1758                 break;
1759         case RTE_FLOW_FIELD_VXLAN_VNI:
1760                 /* not supported yet */
1761                 break;
1762         case RTE_FLOW_FIELD_GENEVE_VNI:
1763                 /* not supported yet*/
1764                 break;
1765         case RTE_FLOW_FIELD_GTP_TEID:
1766                 info[idx] = (struct field_modify_info){4, 0,
1767                                         MLX5_MODI_GTP_TEID};
1768                 if (mask)
1769                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1770                                                      (32 - width));
1771                 break;
1772         case RTE_FLOW_FIELD_TAG:
1773                 {
1774                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1775                                                    data->level, error);
1776                         if (reg < 0)
1777                                 return;
1778                         MLX5_ASSERT(reg != REG_NON);
1779                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1780                         info[idx] = (struct field_modify_info){4, 0,
1781                                                 reg_to_field[reg]};
1782                         if (mask)
1783                                 mask[idx] =
1784                                         rte_cpu_to_be_32(0xffffffff >>
1785                                                          (32 - width));
1786                 }
1787                 break;
1788         case RTE_FLOW_FIELD_MARK:
1789                 {
1790                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1791                         uint32_t mark_count = __builtin_popcount(mark_mask);
1792                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1793                                                        0, error);
1794                         if (reg < 0)
1795                                 return;
1796                         MLX5_ASSERT(reg != REG_NON);
1797                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1798                         info[idx] = (struct field_modify_info){4, 0,
1799                                                 reg_to_field[reg]};
1800                         if (mask)
1801                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1802                                          (mark_count - width)) & mark_mask);
1803                 }
1804                 break;
1805         case RTE_FLOW_FIELD_META:
1806                 {
1807                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1808                         uint32_t meta_count = __builtin_popcount(meta_mask);
1809                         uint32_t msk_c0 =
1810                                 rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
1811                         uint32_t shl_c0 = rte_bsf32(msk_c0);
1812                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1813                         if (reg < 0)
1814                                 return;
1815                         MLX5_ASSERT(reg != REG_NON);
1816                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1817                         if (reg == REG_C_0)
1818                                 *shift = shl_c0;
1819                         info[idx] = (struct field_modify_info){4, 0,
1820                                                 reg_to_field[reg]};
1821                         if (mask)
1822                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1823                                         (meta_count - width)) & meta_mask);
1824                 }
1825                 break;
1826         case RTE_FLOW_FIELD_POINTER:
1827         case RTE_FLOW_FIELD_VALUE:
1828         default:
1829                 MLX5_ASSERT(false);
1830                 break;
1831         }
1832 }
1833
1834 /**
1835  * Convert modify_field action to DV specification.
1836  *
1837  * @param[in] dev
1838  *   Pointer to the rte_eth_dev structure.
1839  * @param[in,out] resource
1840  *   Pointer to the modify-header resource.
1841  * @param[in] action
1842  *   Pointer to action specification.
1843  * @param[in] attr
1844  *   Attributes of flow that includes this item.
1845  * @param[out] error
1846  *   Pointer to the error structure.
1847  *
1848  * @return
1849  *   0 on success, a negative errno value otherwise and rte_errno is set.
1850  */
1851 static int
1852 flow_dv_convert_action_modify_field
1853                         (struct rte_eth_dev *dev,
1854                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1855                          const struct rte_flow_action *action,
1856                          const struct rte_flow_attr *attr,
1857                          struct rte_flow_error *error)
1858 {
1859         const struct rte_flow_action_modify_field *conf =
1860                 (const struct rte_flow_action_modify_field *)(action->conf);
1861         struct rte_flow_item item = {
1862                 .spec = NULL,
1863                 .mask = NULL
1864         };
1865         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1866                                                                 {0, 0, 0} };
1867         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1868                                                                 {0, 0, 0} };
1869         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1870         uint32_t type;
1871         uint32_t shift = 0;
1872
1873         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1874             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1875                 type = MLX5_MODIFICATION_TYPE_SET;
1876                 /** For SET fill the destination field (field) first. */
1877                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1878                                                   conf->width, &shift, dev,
1879                                                   attr, error);
1880                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1881                                         (void *)(uintptr_t)conf->src.pvalue :
1882                                         (void *)(uintptr_t)&conf->src.value;
1883         } else {
1884                 type = MLX5_MODIFICATION_TYPE_COPY;
1885                 /** For COPY fill the destination field (dcopy) without mask. */
1886                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1887                                                   conf->width, &shift, dev,
1888                                                   attr, error);
1889                 /** Then construct the source field (field) with mask. */
1890                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1891                                                   conf->width, &shift,
1892                                                   dev, attr, error);
1893         }
1894         item.mask = &mask;
1895         return flow_dv_convert_modify_action(&item,
1896                         field, dcopy, resource, type, error);
1897 }
1898
1899 /**
1900  * Validate MARK item.
1901  *
1902  * @param[in] dev
1903  *   Pointer to the rte_eth_dev structure.
1904  * @param[in] item
1905  *   Item specification.
1906  * @param[in] attr
1907  *   Attributes of flow that includes this item.
1908  * @param[out] error
1909  *   Pointer to error structure.
1910  *
1911  * @return
1912  *   0 on success, a negative errno value otherwise and rte_errno is set.
1913  */
1914 static int
1915 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1916                            const struct rte_flow_item *item,
1917                            const struct rte_flow_attr *attr __rte_unused,
1918                            struct rte_flow_error *error)
1919 {
1920         struct mlx5_priv *priv = dev->data->dev_private;
1921         struct mlx5_dev_config *config = &priv->config;
1922         const struct rte_flow_item_mark *spec = item->spec;
1923         const struct rte_flow_item_mark *mask = item->mask;
1924         const struct rte_flow_item_mark nic_mask = {
1925                 .id = priv->sh->dv_mark_mask,
1926         };
1927         int ret;
1928
1929         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1930                 return rte_flow_error_set(error, ENOTSUP,
1931                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1932                                           "extended metadata feature"
1933                                           " isn't enabled");
1934         if (!mlx5_flow_ext_mreg_supported(dev))
1935                 return rte_flow_error_set(error, ENOTSUP,
1936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1937                                           "extended metadata register"
1938                                           " isn't supported");
1939         if (!nic_mask.id)
1940                 return rte_flow_error_set(error, ENOTSUP,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "extended metadata register"
1943                                           " isn't available");
1944         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1945         if (ret < 0)
1946                 return ret;
1947         if (!spec)
1948                 return rte_flow_error_set(error, EINVAL,
1949                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1950                                           item->spec,
1951                                           "data cannot be empty");
1952         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1953                 return rte_flow_error_set(error, EINVAL,
1954                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1955                                           &spec->id,
1956                                           "mark id exceeds the limit");
1957         if (!mask)
1958                 mask = &nic_mask;
1959         if (!mask->id)
1960                 return rte_flow_error_set(error, EINVAL,
1961                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1962                                         "mask cannot be zero");
1963
1964         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1965                                         (const uint8_t *)&nic_mask,
1966                                         sizeof(struct rte_flow_item_mark),
1967                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1968         if (ret < 0)
1969                 return ret;
1970         return 0;
1971 }
1972
1973 /**
1974  * Validate META item.
1975  *
1976  * @param[in] dev
1977  *   Pointer to the rte_eth_dev structure.
1978  * @param[in] item
1979  *   Item specification.
1980  * @param[in] attr
1981  *   Attributes of flow that includes this item.
1982  * @param[out] error
1983  *   Pointer to error structure.
1984  *
1985  * @return
1986  *   0 on success, a negative errno value otherwise and rte_errno is set.
1987  */
1988 static int
1989 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1990                            const struct rte_flow_item *item,
1991                            const struct rte_flow_attr *attr,
1992                            struct rte_flow_error *error)
1993 {
1994         struct mlx5_priv *priv = dev->data->dev_private;
1995         struct mlx5_dev_config *config = &priv->config;
1996         const struct rte_flow_item_meta *spec = item->spec;
1997         const struct rte_flow_item_meta *mask = item->mask;
1998         struct rte_flow_item_meta nic_mask = {
1999                 .data = UINT32_MAX
2000         };
2001         int reg;
2002         int ret;
2003
2004         if (!spec)
2005                 return rte_flow_error_set(error, EINVAL,
2006                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2007                                           item->spec,
2008                                           "data cannot be empty");
2009         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2010                 if (!mlx5_flow_ext_mreg_supported(dev))
2011                         return rte_flow_error_set(error, ENOTSUP,
2012                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2013                                           "extended metadata register"
2014                                           " isn't supported");
2015                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2016                 if (reg < 0)
2017                         return reg;
2018                 if (reg == REG_NON)
2019                         return rte_flow_error_set(error, ENOTSUP,
2020                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2021                                         "unavalable extended metadata register");
2022                 if (reg == REG_B)
2023                         return rte_flow_error_set(error, ENOTSUP,
2024                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2025                                           "match on reg_b "
2026                                           "isn't supported");
2027                 if (reg != REG_A)
2028                         nic_mask.data = priv->sh->dv_meta_mask;
2029         } else {
2030                 if (attr->transfer)
2031                         return rte_flow_error_set(error, ENOTSUP,
2032                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2033                                         "extended metadata feature "
2034                                         "should be enabled when "
2035                                         "meta item is requested "
2036                                         "with e-switch mode ");
2037                 if (attr->ingress)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "match on metadata for ingress "
2041                                         "is not supported in legacy "
2042                                         "metadata mode");
2043         }
2044         if (!mask)
2045                 mask = &rte_flow_item_meta_mask;
2046         if (!mask->data)
2047                 return rte_flow_error_set(error, EINVAL,
2048                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2049                                         "mask cannot be zero");
2050
2051         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2052                                         (const uint8_t *)&nic_mask,
2053                                         sizeof(struct rte_flow_item_meta),
2054                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2055         return ret;
2056 }
2057
2058 /**
2059  * Validate TAG item.
2060  *
2061  * @param[in] dev
2062  *   Pointer to the rte_eth_dev structure.
2063  * @param[in] item
2064  *   Item specification.
2065  * @param[in] attr
2066  *   Attributes of flow that includes this item.
2067  * @param[out] error
2068  *   Pointer to error structure.
2069  *
2070  * @return
2071  *   0 on success, a negative errno value otherwise and rte_errno is set.
2072  */
2073 static int
2074 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2075                           const struct rte_flow_item *item,
2076                           const struct rte_flow_attr *attr __rte_unused,
2077                           struct rte_flow_error *error)
2078 {
2079         const struct rte_flow_item_tag *spec = item->spec;
2080         const struct rte_flow_item_tag *mask = item->mask;
2081         const struct rte_flow_item_tag nic_mask = {
2082                 .data = RTE_BE32(UINT32_MAX),
2083                 .index = 0xff,
2084         };
2085         int ret;
2086
2087         if (!mlx5_flow_ext_mreg_supported(dev))
2088                 return rte_flow_error_set(error, ENOTSUP,
2089                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2090                                           "extensive metadata register"
2091                                           " isn't supported");
2092         if (!spec)
2093                 return rte_flow_error_set(error, EINVAL,
2094                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2095                                           item->spec,
2096                                           "data cannot be empty");
2097         if (!mask)
2098                 mask = &rte_flow_item_tag_mask;
2099         if (!mask->data)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2102                                         "mask cannot be zero");
2103
2104         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2105                                         (const uint8_t *)&nic_mask,
2106                                         sizeof(struct rte_flow_item_tag),
2107                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2108         if (ret < 0)
2109                 return ret;
2110         if (mask->index != 0xff)
2111                 return rte_flow_error_set(error, EINVAL,
2112                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2113                                           "partial mask for tag index"
2114                                           " is not supported");
2115         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2116         if (ret < 0)
2117                 return ret;
2118         MLX5_ASSERT(ret != REG_NON);
2119         return 0;
2120 }
2121
2122 /**
2123  * Validate vport item.
2124  *
2125  * @param[in] dev
2126  *   Pointer to the rte_eth_dev structure.
2127  * @param[in] item
2128  *   Item specification.
2129  * @param[in] attr
2130  *   Attributes of flow that includes this item.
2131  * @param[in] item_flags
2132  *   Bit-fields that holds the items detected until now.
2133  * @param[out] error
2134  *   Pointer to error structure.
2135  *
2136  * @return
2137  *   0 on success, a negative errno value otherwise and rte_errno is set.
2138  */
2139 static int
2140 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2141                               const struct rte_flow_item *item,
2142                               const struct rte_flow_attr *attr,
2143                               uint64_t item_flags,
2144                               struct rte_flow_error *error)
2145 {
2146         const struct rte_flow_item_port_id *spec = item->spec;
2147         const struct rte_flow_item_port_id *mask = item->mask;
2148         const struct rte_flow_item_port_id switch_mask = {
2149                         .id = 0xffffffff,
2150         };
2151         struct mlx5_priv *esw_priv;
2152         struct mlx5_priv *dev_priv;
2153         int ret;
2154
2155         if (!attr->transfer)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM,
2158                                           NULL,
2159                                           "match on port id is valid only"
2160                                           " when transfer flag is enabled");
2161         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2162                 return rte_flow_error_set(error, ENOTSUP,
2163                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2164                                           "multiple source ports are not"
2165                                           " supported");
2166         if (!mask)
2167                 mask = &switch_mask;
2168         if (mask->id != 0xffffffff)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2171                                            mask,
2172                                            "no support for partial mask on"
2173                                            " \"id\" field");
2174         ret = mlx5_flow_item_acceptable
2175                                 (item, (const uint8_t *)mask,
2176                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2177                                  sizeof(struct rte_flow_item_port_id),
2178                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2179         if (ret)
2180                 return ret;
2181         if (!spec)
2182                 return 0;
2183         if (spec->id == MLX5_PORT_ESW_MGR)
2184                 return 0;
2185         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2186         if (!esw_priv)
2187                 return rte_flow_error_set(error, rte_errno,
2188                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2189                                           "failed to obtain E-Switch info for"
2190                                           " port");
2191         dev_priv = mlx5_dev_to_eswitch_info(dev);
2192         if (!dev_priv)
2193                 return rte_flow_error_set(error, rte_errno,
2194                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2195                                           NULL,
2196                                           "failed to obtain E-Switch info");
2197         if (esw_priv->domain_id != dev_priv->domain_id)
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2200                                           "cannot match on a port from a"
2201                                           " different E-Switch");
2202         return 0;
2203 }
2204
2205 /**
2206  * Validate VLAN item.
2207  *
2208  * @param[in] item
2209  *   Item specification.
2210  * @param[in] item_flags
2211  *   Bit-fields that holds the items detected until now.
2212  * @param[in] dev
2213  *   Ethernet device flow is being created on.
2214  * @param[out] error
2215  *   Pointer to error structure.
2216  *
2217  * @return
2218  *   0 on success, a negative errno value otherwise and rte_errno is set.
2219  */
2220 static int
2221 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2222                            uint64_t item_flags,
2223                            struct rte_eth_dev *dev,
2224                            struct rte_flow_error *error)
2225 {
2226         const struct rte_flow_item_vlan *mask = item->mask;
2227         const struct rte_flow_item_vlan nic_mask = {
2228                 .tci = RTE_BE16(UINT16_MAX),
2229                 .inner_type = RTE_BE16(UINT16_MAX),
2230                 .has_more_vlan = 1,
2231         };
2232         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2233         int ret;
2234         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2235                                         MLX5_FLOW_LAYER_INNER_L4) :
2236                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2237                                         MLX5_FLOW_LAYER_OUTER_L4);
2238         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2239                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2240
2241         if (item_flags & vlanm)
2242                 return rte_flow_error_set(error, EINVAL,
2243                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2244                                           "multiple VLAN layers not supported");
2245         else if ((item_flags & l34m) != 0)
2246                 return rte_flow_error_set(error, EINVAL,
2247                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2248                                           "VLAN cannot follow L3/L4 layer");
2249         if (!mask)
2250                 mask = &rte_flow_item_vlan_mask;
2251         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2252                                         (const uint8_t *)&nic_mask,
2253                                         sizeof(struct rte_flow_item_vlan),
2254                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2255         if (ret)
2256                 return ret;
2257         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2258                 struct mlx5_priv *priv = dev->data->dev_private;
2259
2260                 if (priv->vmwa_context) {
2261                         /*
2262                          * Non-NULL context means we have a virtual machine
2263                          * and SR-IOV enabled, we have to create VLAN interface
2264                          * to make hypervisor to setup E-Switch vport
2265                          * context correctly. We avoid creating the multiple
2266                          * VLAN interfaces, so we cannot support VLAN tag mask.
2267                          */
2268                         return rte_flow_error_set(error, EINVAL,
2269                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2270                                                   item,
2271                                                   "VLAN tag mask is not"
2272                                                   " supported in virtual"
2273                                                   " environment");
2274                 }
2275         }
2276         return 0;
2277 }
2278
2279 /*
2280  * GTP flags are contained in 1 byte of the format:
2281  * -------------------------------------------
2282  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2283  * |-----------------------------------------|
2284  * | value | Version | PT | Res | E | S | PN |
2285  * -------------------------------------------
2286  *
2287  * Matching is supported only for GTP flags E, S, PN.
2288  */
2289 #define MLX5_GTP_FLAGS_MASK     0x07
2290
2291 /**
2292  * Validate GTP item.
2293  *
2294  * @param[in] dev
2295  *   Pointer to the rte_eth_dev structure.
2296  * @param[in] item
2297  *   Item specification.
2298  * @param[in] item_flags
2299  *   Bit-fields that holds the items detected until now.
2300  * @param[out] error
2301  *   Pointer to error structure.
2302  *
2303  * @return
2304  *   0 on success, a negative errno value otherwise and rte_errno is set.
2305  */
2306 static int
2307 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2308                           const struct rte_flow_item *item,
2309                           uint64_t item_flags,
2310                           struct rte_flow_error *error)
2311 {
2312         struct mlx5_priv *priv = dev->data->dev_private;
2313         const struct rte_flow_item_gtp *spec = item->spec;
2314         const struct rte_flow_item_gtp *mask = item->mask;
2315         const struct rte_flow_item_gtp nic_mask = {
2316                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2317                 .msg_type = 0xff,
2318                 .teid = RTE_BE32(0xffffffff),
2319         };
2320
2321         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2322                 return rte_flow_error_set(error, ENOTSUP,
2323                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2324                                           "GTP support is not enabled");
2325         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2326                 return rte_flow_error_set(error, ENOTSUP,
2327                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2328                                           "multiple tunnel layers not"
2329                                           " supported");
2330         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2331                 return rte_flow_error_set(error, EINVAL,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "no outer UDP layer found");
2334         if (!mask)
2335                 mask = &rte_flow_item_gtp_mask;
2336         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2337                 return rte_flow_error_set(error, ENOTSUP,
2338                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2339                                           "Match is supported for GTP"
2340                                           " flags only");
2341         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2342                                          (const uint8_t *)&nic_mask,
2343                                          sizeof(struct rte_flow_item_gtp),
2344                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2345 }
2346
2347 /**
2348  * Validate GTP PSC item.
2349  *
2350  * @param[in] item
2351  *   Item specification.
2352  * @param[in] last_item
2353  *   Previous validated item in the pattern items.
2354  * @param[in] gtp_item
2355  *   Previous GTP item specification.
2356  * @param[in] attr
2357  *   Pointer to flow attributes.
2358  * @param[out] error
2359  *   Pointer to error structure.
2360  *
2361  * @return
2362  *   0 on success, a negative errno value otherwise and rte_errno is set.
2363  */
2364 static int
2365 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2366                               uint64_t last_item,
2367                               const struct rte_flow_item *gtp_item,
2368                               const struct rte_flow_attr *attr,
2369                               struct rte_flow_error *error)
2370 {
2371         const struct rte_flow_item_gtp *gtp_spec;
2372         const struct rte_flow_item_gtp *gtp_mask;
2373         const struct rte_flow_item_gtp_psc *mask;
2374         const struct rte_flow_item_gtp_psc nic_mask = {
2375                 .hdr.type = 0xF,
2376                 .hdr.qfi = 0x3F,
2377         };
2378
2379         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2380                 return rte_flow_error_set
2381                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2382                          "GTP PSC item must be preceded with GTP item");
2383         gtp_spec = gtp_item->spec;
2384         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2385         /* GTP spec and E flag is requested to match zero. */
2386         if (gtp_spec &&
2387                 (gtp_mask->v_pt_rsv_flags &
2388                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2389                 return rte_flow_error_set
2390                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2391                          "GTP E flag must be 1 to match GTP PSC");
2392         /* Check the flow is not created in group zero. */
2393         if (!attr->transfer && !attr->group)
2394                 return rte_flow_error_set
2395                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2396                          "GTP PSC is not supported for group 0");
2397         /* GTP spec is here and E flag is requested to match zero. */
2398         if (!item->spec)
2399                 return 0;
2400         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2401         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2402                                          (const uint8_t *)&nic_mask,
2403                                          sizeof(struct rte_flow_item_gtp_psc),
2404                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2405 }
2406
2407 /**
2408  * Validate IPV4 item.
2409  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2410  * add specific validation of fragment_offset field,
2411  *
2412  * @param[in] item
2413  *   Item specification.
2414  * @param[in] item_flags
2415  *   Bit-fields that holds the items detected until now.
2416  * @param[out] error
2417  *   Pointer to error structure.
2418  *
2419  * @return
2420  *   0 on success, a negative errno value otherwise and rte_errno is set.
2421  */
2422 static int
2423 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2424                            const struct rte_flow_item *item,
2425                            uint64_t item_flags, uint64_t last_item,
2426                            uint16_t ether_type, struct rte_flow_error *error)
2427 {
2428         int ret;
2429         struct mlx5_priv *priv = dev->data->dev_private;
2430         const struct rte_flow_item_ipv4 *spec = item->spec;
2431         const struct rte_flow_item_ipv4 *last = item->last;
2432         const struct rte_flow_item_ipv4 *mask = item->mask;
2433         rte_be16_t fragment_offset_spec = 0;
2434         rte_be16_t fragment_offset_last = 0;
2435         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2436                 .hdr = {
2437                         .src_addr = RTE_BE32(0xffffffff),
2438                         .dst_addr = RTE_BE32(0xffffffff),
2439                         .type_of_service = 0xff,
2440                         .fragment_offset = RTE_BE16(0xffff),
2441                         .next_proto_id = 0xff,
2442                         .time_to_live = 0xff,
2443                 },
2444         };
2445
2446         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2447                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2448                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2449                                priv->config.hca_attr.inner_ipv4_ihl;
2450                 if (!ihl_cap)
2451                         return rte_flow_error_set(error, ENOTSUP,
2452                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2453                                                   item,
2454                                                   "IPV4 ihl offload not supported");
2455                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2456         }
2457         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2458                                            ether_type, &nic_ipv4_mask,
2459                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2460         if (ret < 0)
2461                 return ret;
2462         if (spec && mask)
2463                 fragment_offset_spec = spec->hdr.fragment_offset &
2464                                        mask->hdr.fragment_offset;
2465         if (!fragment_offset_spec)
2466                 return 0;
2467         /*
2468          * spec and mask are valid, enforce using full mask to make sure the
2469          * complete value is used correctly.
2470          */
2471         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2472                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2475                                           item, "must use full mask for"
2476                                           " fragment_offset");
2477         /*
2478          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2479          * indicating this is 1st fragment of fragmented packet.
2480          * This is not yet supported in MLX5, return appropriate error message.
2481          */
2482         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2483                 return rte_flow_error_set(error, ENOTSUP,
2484                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2485                                           "match on first fragment not "
2486                                           "supported");
2487         if (fragment_offset_spec && !last)
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "specified value not supported");
2491         /* spec and last are valid, validate the specified range. */
2492         fragment_offset_last = last->hdr.fragment_offset &
2493                                mask->hdr.fragment_offset;
2494         /*
2495          * Match on fragment_offset spec 0x2001 and last 0x3fff
2496          * means MF is 1 and frag-offset is > 0.
2497          * This packet is fragment 2nd and onward, excluding last.
2498          * This is not yet supported in MLX5, return appropriate
2499          * error message.
2500          */
2501         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2502             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2505                                           last, "match on following "
2506                                           "fragments not supported");
2507         /*
2508          * Match on fragment_offset spec 0x0001 and last 0x1fff
2509          * means MF is 0 and frag-offset is > 0.
2510          * This packet is last fragment of fragmented packet.
2511          * This is not yet supported in MLX5, return appropriate
2512          * error message.
2513          */
2514         if (fragment_offset_spec == RTE_BE16(1) &&
2515             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2516                 return rte_flow_error_set(error, ENOTSUP,
2517                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2518                                           last, "match on last "
2519                                           "fragment not supported");
2520         /*
2521          * Match on fragment_offset spec 0x0001 and last 0x3fff
2522          * means MF and/or frag-offset is not 0.
2523          * This is a fragmented packet.
2524          * Other range values are invalid and rejected.
2525          */
2526         if (!(fragment_offset_spec == RTE_BE16(1) &&
2527               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2528                 return rte_flow_error_set(error, ENOTSUP,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2530                                           "specified range not supported");
2531         return 0;
2532 }
2533
2534 /**
2535  * Validate IPV6 fragment extension item.
2536  *
2537  * @param[in] item
2538  *   Item specification.
2539  * @param[in] item_flags
2540  *   Bit-fields that holds the items detected until now.
2541  * @param[out] error
2542  *   Pointer to error structure.
2543  *
2544  * @return
2545  *   0 on success, a negative errno value otherwise and rte_errno is set.
2546  */
2547 static int
2548 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2549                                     uint64_t item_flags,
2550                                     struct rte_flow_error *error)
2551 {
2552         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2553         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2554         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2555         rte_be16_t frag_data_spec = 0;
2556         rte_be16_t frag_data_last = 0;
2557         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2558         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2559                                       MLX5_FLOW_LAYER_OUTER_L4;
2560         int ret = 0;
2561         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2562                 .hdr = {
2563                         .next_header = 0xff,
2564                         .frag_data = RTE_BE16(0xffff),
2565                 },
2566         };
2567
2568         if (item_flags & l4m)
2569                 return rte_flow_error_set(error, EINVAL,
2570                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2571                                           "ipv6 fragment extension item cannot "
2572                                           "follow L4 item.");
2573         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2574             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2575                 return rte_flow_error_set(error, EINVAL,
2576                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2577                                           "ipv6 fragment extension item must "
2578                                           "follow ipv6 item");
2579         if (spec && mask)
2580                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2581         if (!frag_data_spec)
2582                 return 0;
2583         /*
2584          * spec and mask are valid, enforce using full mask to make sure the
2585          * complete value is used correctly.
2586          */
2587         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2588                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2591                                           item, "must use full mask for"
2592                                           " frag_data");
2593         /*
2594          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2595          * This is 1st fragment of fragmented packet.
2596          */
2597         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2598                 return rte_flow_error_set(error, ENOTSUP,
2599                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2600                                           "match on first fragment not "
2601                                           "supported");
2602         if (frag_data_spec && !last)
2603                 return rte_flow_error_set(error, EINVAL,
2604                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2605                                           "specified value not supported");
2606         ret = mlx5_flow_item_acceptable
2607                                 (item, (const uint8_t *)mask,
2608                                  (const uint8_t *)&nic_mask,
2609                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2610                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2611         if (ret)
2612                 return ret;
2613         /* spec and last are valid, validate the specified range. */
2614         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2615         /*
2616          * Match on frag_data spec 0x0009 and last 0xfff9
2617          * means M is 1 and frag-offset is > 0.
2618          * This packet is fragment 2nd and onward, excluding last.
2619          * This is not yet supported in MLX5, return appropriate
2620          * error message.
2621          */
2622         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2623                                        RTE_IPV6_EHDR_MF_MASK) &&
2624             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2625                 return rte_flow_error_set(error, ENOTSUP,
2626                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2627                                           last, "match on following "
2628                                           "fragments not supported");
2629         /*
2630          * Match on frag_data spec 0x0008 and last 0xfff8
2631          * means M is 0 and frag-offset is > 0.
2632          * This packet is last fragment of fragmented packet.
2633          * This is not yet supported in MLX5, return appropriate
2634          * error message.
2635          */
2636         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2637             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2640                                           last, "match on last "
2641                                           "fragment not supported");
2642         /* Other range values are invalid and rejected. */
2643         return rte_flow_error_set(error, EINVAL,
2644                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2645                                   "specified range not supported");
2646 }
2647
2648 /*
2649  * Validate ASO CT item.
2650  *
2651  * @param[in] dev
2652  *   Pointer to the rte_eth_dev structure.
2653  * @param[in] item
2654  *   Item specification.
2655  * @param[in] item_flags
2656  *   Pointer to bit-fields that holds the items detected until now.
2657  * @param[out] error
2658  *   Pointer to error structure.
2659  *
2660  * @return
2661  *   0 on success, a negative errno value otherwise and rte_errno is set.
2662  */
2663 static int
2664 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2665                              const struct rte_flow_item *item,
2666                              uint64_t *item_flags,
2667                              struct rte_flow_error *error)
2668 {
2669         const struct rte_flow_item_conntrack *spec = item->spec;
2670         const struct rte_flow_item_conntrack *mask = item->mask;
2671         RTE_SET_USED(dev);
2672         uint32_t flags;
2673
2674         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2677                                           "Only one CT is supported");
2678         if (!mask)
2679                 mask = &rte_flow_item_conntrack_mask;
2680         flags = spec->flags & mask->flags;
2681         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2682             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2683              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2684              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2685                 return rte_flow_error_set(error, EINVAL,
2686                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2687                                           "Conflict status bits");
2688         /* State change also needs to be considered. */
2689         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2690         return 0;
2691 }
2692
2693 /**
2694  * Validate the pop VLAN action.
2695  *
2696  * @param[in] dev
2697  *   Pointer to the rte_eth_dev structure.
2698  * @param[in] action_flags
2699  *   Holds the actions detected until now.
2700  * @param[in] action
2701  *   Pointer to the pop vlan action.
2702  * @param[in] item_flags
2703  *   The items found in this flow rule.
2704  * @param[in] attr
2705  *   Pointer to flow attributes.
2706  * @param[out] error
2707  *   Pointer to error structure.
2708  *
2709  * @return
2710  *   0 on success, a negative errno value otherwise and rte_errno is set.
2711  */
2712 static int
2713 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2714                                  uint64_t action_flags,
2715                                  const struct rte_flow_action *action,
2716                                  uint64_t item_flags,
2717                                  const struct rte_flow_attr *attr,
2718                                  struct rte_flow_error *error)
2719 {
2720         const struct mlx5_priv *priv = dev->data->dev_private;
2721         struct mlx5_dev_ctx_shared *sh = priv->sh;
2722         bool direction_error = false;
2723
2724         if (!priv->sh->pop_vlan_action)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2727                                           NULL,
2728                                           "pop vlan action is not supported");
2729         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2730         if (attr->transfer) {
2731                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2732                 bool is_cx5 = sh->steering_format_version ==
2733                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2734
2735                 if (fdb_tx && is_cx5)
2736                         direction_error = true;
2737         } else if (attr->egress) {
2738                 direction_error = true;
2739         }
2740         if (direction_error)
2741                 return rte_flow_error_set(error, ENOTSUP,
2742                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2743                                           NULL,
2744                                           "pop vlan action not supported for egress");
2745         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2746                 return rte_flow_error_set(error, ENOTSUP,
2747                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2748                                           "no support for multiple VLAN "
2749                                           "actions");
2750         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2751         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2752             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2753                 return rte_flow_error_set(error, ENOTSUP,
2754                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2755                                           NULL,
2756                                           "cannot pop vlan after decap without "
2757                                           "match on inner vlan in the flow");
2758         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2759         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2760             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2761                 return rte_flow_error_set(error, ENOTSUP,
2762                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2763                                           NULL,
2764                                           "cannot pop vlan without a "
2765                                           "match on (outer) vlan in the flow");
2766         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2767                 return rte_flow_error_set(error, EINVAL,
2768                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2769                                           "wrong action order, port_id should "
2770                                           "be after pop VLAN action");
2771         if (!attr->transfer && priv->representor)
2772                 return rte_flow_error_set(error, ENOTSUP,
2773                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2774                                           "pop vlan action for VF representor "
2775                                           "not supported on NIC table");
2776         return 0;
2777 }
2778
2779 /**
2780  * Get VLAN default info from vlan match info.
2781  *
2782  * @param[in] items
2783  *   the list of item specifications.
2784  * @param[out] vlan
2785  *   pointer VLAN info to fill to.
2786  *
2787  * @return
2788  *   0 on success, a negative errno value otherwise and rte_errno is set.
2789  */
2790 static void
2791 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2792                                   struct rte_vlan_hdr *vlan)
2793 {
2794         const struct rte_flow_item_vlan nic_mask = {
2795                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2796                                 MLX5DV_FLOW_VLAN_VID_MASK),
2797                 .inner_type = RTE_BE16(0xffff),
2798         };
2799
2800         if (items == NULL)
2801                 return;
2802         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2803                 int type = items->type;
2804
2805                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2806                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2807                         break;
2808         }
2809         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2810                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2811                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2812
2813                 /* If VLAN item in pattern doesn't contain data, return here. */
2814                 if (!vlan_v)
2815                         return;
2816                 if (!vlan_m)
2817                         vlan_m = &nic_mask;
2818                 /* Only full match values are accepted */
2819                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2820                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2821                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2822                         vlan->vlan_tci |=
2823                                 rte_be_to_cpu_16(vlan_v->tci &
2824                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2825                 }
2826                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2827                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2828                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2829                         vlan->vlan_tci |=
2830                                 rte_be_to_cpu_16(vlan_v->tci &
2831                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2832                 }
2833                 if (vlan_m->inner_type == nic_mask.inner_type)
2834                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2835                                                            vlan_m->inner_type);
2836         }
2837 }
2838
2839 /**
2840  * Validate the push VLAN action.
2841  *
2842  * @param[in] dev
2843  *   Pointer to the rte_eth_dev structure.
2844  * @param[in] action_flags
2845  *   Holds the actions detected until now.
2846  * @param[in] item_flags
2847  *   The items found in this flow rule.
2848  * @param[in] action
2849  *   Pointer to the action structure.
2850  * @param[in] attr
2851  *   Pointer to flow attributes
2852  * @param[out] error
2853  *   Pointer to error structure.
2854  *
2855  * @return
2856  *   0 on success, a negative errno value otherwise and rte_errno is set.
2857  */
2858 static int
2859 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2860                                   uint64_t action_flags,
2861                                   const struct rte_flow_item_vlan *vlan_m,
2862                                   const struct rte_flow_action *action,
2863                                   const struct rte_flow_attr *attr,
2864                                   struct rte_flow_error *error)
2865 {
2866         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2867         const struct mlx5_priv *priv = dev->data->dev_private;
2868         struct mlx5_dev_ctx_shared *sh = priv->sh;
2869         bool direction_error = false;
2870
2871         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2872             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2873                 return rte_flow_error_set(error, EINVAL,
2874                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2875                                           "invalid vlan ethertype");
2876         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2877                 return rte_flow_error_set(error, EINVAL,
2878                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2879                                           "wrong action order, port_id should "
2880                                           "be after push VLAN");
2881         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2882         if (attr->transfer) {
2883                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2884                 bool is_cx5 = sh->steering_format_version ==
2885                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2886
2887                 if (!fdb_tx && is_cx5)
2888                         direction_error = true;
2889         } else if (attr->ingress) {
2890                 direction_error = true;
2891         }
2892         if (direction_error)
2893                 return rte_flow_error_set(error, ENOTSUP,
2894                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2895                                           NULL,
2896                                           "push vlan action not supported for ingress");
2897         if (!attr->transfer && priv->representor)
2898                 return rte_flow_error_set(error, ENOTSUP,
2899                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2900                                           "push vlan action for VF representor "
2901                                           "not supported on NIC table");
2902         if (vlan_m &&
2903             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2904             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2905                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2906             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2907             !(mlx5_flow_find_action
2908                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2909                 return rte_flow_error_set(error, EINVAL,
2910                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2911                                           "not full match mask on VLAN PCP and "
2912                                           "there is no of_set_vlan_pcp action, "
2913                                           "push VLAN action cannot figure out "
2914                                           "PCP value");
2915         if (vlan_m &&
2916             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2917             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2918                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2919             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2920             !(mlx5_flow_find_action
2921                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2922                 return rte_flow_error_set(error, EINVAL,
2923                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2924                                           "not full match mask on VLAN VID and "
2925                                           "there is no of_set_vlan_vid action, "
2926                                           "push VLAN action cannot figure out "
2927                                           "VID value");
2928         (void)attr;
2929         return 0;
2930 }
2931
2932 /**
2933  * Validate the set VLAN PCP.
2934  *
2935  * @param[in] action_flags
2936  *   Holds the actions detected until now.
2937  * @param[in] actions
2938  *   Pointer to the list of actions remaining in the flow rule.
2939  * @param[out] error
2940  *   Pointer to error structure.
2941  *
2942  * @return
2943  *   0 on success, a negative errno value otherwise and rte_errno is set.
2944  */
2945 static int
2946 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2947                                      const struct rte_flow_action actions[],
2948                                      struct rte_flow_error *error)
2949 {
2950         const struct rte_flow_action *action = actions;
2951         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2952
2953         if (conf->vlan_pcp > 7)
2954                 return rte_flow_error_set(error, EINVAL,
2955                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2956                                           "VLAN PCP value is too big");
2957         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2958                 return rte_flow_error_set(error, ENOTSUP,
2959                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2960                                           "set VLAN PCP action must follow "
2961                                           "the push VLAN action");
2962         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2963                 return rte_flow_error_set(error, ENOTSUP,
2964                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2965                                           "Multiple VLAN PCP modification are "
2966                                           "not supported");
2967         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2968                 return rte_flow_error_set(error, EINVAL,
2969                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2970                                           "wrong action order, port_id should "
2971                                           "be after set VLAN PCP");
2972         return 0;
2973 }
2974
2975 /**
2976  * Validate the set VLAN VID.
2977  *
2978  * @param[in] item_flags
2979  *   Holds the items detected in this rule.
2980  * @param[in] action_flags
2981  *   Holds the actions detected until now.
2982  * @param[in] actions
2983  *   Pointer to the list of actions remaining in the flow rule.
2984  * @param[out] error
2985  *   Pointer to error structure.
2986  *
2987  * @return
2988  *   0 on success, a negative errno value otherwise and rte_errno is set.
2989  */
2990 static int
2991 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2992                                      uint64_t action_flags,
2993                                      const struct rte_flow_action actions[],
2994                                      struct rte_flow_error *error)
2995 {
2996         const struct rte_flow_action *action = actions;
2997         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2998
2999         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3000                 return rte_flow_error_set(error, EINVAL,
3001                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3002                                           "VLAN VID value is too big");
3003         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3004             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3005                 return rte_flow_error_set(error, ENOTSUP,
3006                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3007                                           "set VLAN VID action must follow push"
3008                                           " VLAN action or match on VLAN item");
3009         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3010                 return rte_flow_error_set(error, ENOTSUP,
3011                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3012                                           "Multiple VLAN VID modifications are "
3013                                           "not supported");
3014         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3015                 return rte_flow_error_set(error, EINVAL,
3016                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3017                                           "wrong action order, port_id should "
3018                                           "be after set VLAN VID");
3019         return 0;
3020 }
3021
3022 /*
3023  * Validate the FLAG action.
3024  *
3025  * @param[in] dev
3026  *   Pointer to the rte_eth_dev structure.
3027  * @param[in] action_flags
3028  *   Holds the actions detected until now.
3029  * @param[in] attr
3030  *   Pointer to flow attributes
3031  * @param[out] error
3032  *   Pointer to error structure.
3033  *
3034  * @return
3035  *   0 on success, a negative errno value otherwise and rte_errno is set.
3036  */
3037 static int
3038 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3039                              uint64_t action_flags,
3040                              const struct rte_flow_attr *attr,
3041                              struct rte_flow_error *error)
3042 {
3043         struct mlx5_priv *priv = dev->data->dev_private;
3044         struct mlx5_dev_config *config = &priv->config;
3045         int ret;
3046
3047         /* Fall back if no extended metadata register support. */
3048         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3049                 return mlx5_flow_validate_action_flag(action_flags, attr,
3050                                                       error);
3051         /* Extensive metadata mode requires registers. */
3052         if (!mlx5_flow_ext_mreg_supported(dev))
3053                 return rte_flow_error_set(error, ENOTSUP,
3054                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3055                                           "no metadata registers "
3056                                           "to support flag action");
3057         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3058                 return rte_flow_error_set(error, ENOTSUP,
3059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3060                                           "extended metadata register"
3061                                           " isn't available");
3062         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3063         if (ret < 0)
3064                 return ret;
3065         MLX5_ASSERT(ret > 0);
3066         if (action_flags & MLX5_FLOW_ACTION_MARK)
3067                 return rte_flow_error_set(error, EINVAL,
3068                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3069                                           "can't mark and flag in same flow");
3070         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3071                 return rte_flow_error_set(error, EINVAL,
3072                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3073                                           "can't have 2 flag"
3074                                           " actions in same flow");
3075         return 0;
3076 }
3077
3078 /**
3079  * Validate MARK action.
3080  *
3081  * @param[in] dev
3082  *   Pointer to the rte_eth_dev structure.
3083  * @param[in] action
3084  *   Pointer to action.
3085  * @param[in] action_flags
3086  *   Holds the actions detected until now.
3087  * @param[in] attr
3088  *   Pointer to flow attributes
3089  * @param[out] error
3090  *   Pointer to error structure.
3091  *
3092  * @return
3093  *   0 on success, a negative errno value otherwise and rte_errno is set.
3094  */
3095 static int
3096 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3097                              const struct rte_flow_action *action,
3098                              uint64_t action_flags,
3099                              const struct rte_flow_attr *attr,
3100                              struct rte_flow_error *error)
3101 {
3102         struct mlx5_priv *priv = dev->data->dev_private;
3103         struct mlx5_dev_config *config = &priv->config;
3104         const struct rte_flow_action_mark *mark = action->conf;
3105         int ret;
3106
3107         if (is_tunnel_offload_active(dev))
3108                 return rte_flow_error_set(error, ENOTSUP,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3110                                           "no mark action "
3111                                           "if tunnel offload active");
3112         /* Fall back if no extended metadata register support. */
3113         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3114                 return mlx5_flow_validate_action_mark(action, action_flags,
3115                                                       attr, error);
3116         /* Extensive metadata mode requires registers. */
3117         if (!mlx5_flow_ext_mreg_supported(dev))
3118                 return rte_flow_error_set(error, ENOTSUP,
3119                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3120                                           "no metadata registers "
3121                                           "to support mark action");
3122         if (!priv->sh->dv_mark_mask)
3123                 return rte_flow_error_set(error, ENOTSUP,
3124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3125                                           "extended metadata register"
3126                                           " isn't available");
3127         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3128         if (ret < 0)
3129                 return ret;
3130         MLX5_ASSERT(ret > 0);
3131         if (!mark)
3132                 return rte_flow_error_set(error, EINVAL,
3133                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3134                                           "configuration cannot be null");
3135         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3136                 return rte_flow_error_set(error, EINVAL,
3137                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3138                                           &mark->id,
3139                                           "mark id exceeds the limit");
3140         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3141                 return rte_flow_error_set(error, EINVAL,
3142                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3143                                           "can't flag and mark in same flow");
3144         if (action_flags & MLX5_FLOW_ACTION_MARK)
3145                 return rte_flow_error_set(error, EINVAL,
3146                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3147                                           "can't have 2 mark actions in same"
3148                                           " flow");
3149         return 0;
3150 }
3151
3152 /**
3153  * Validate SET_META action.
3154  *
3155  * @param[in] dev
3156  *   Pointer to the rte_eth_dev structure.
3157  * @param[in] action
3158  *   Pointer to the action structure.
3159  * @param[in] action_flags
3160  *   Holds the actions detected until now.
3161  * @param[in] attr
3162  *   Pointer to flow attributes
3163  * @param[out] error
3164  *   Pointer to error structure.
3165  *
3166  * @return
3167  *   0 on success, a negative errno value otherwise and rte_errno is set.
3168  */
3169 static int
3170 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3171                                  const struct rte_flow_action *action,
3172                                  uint64_t action_flags __rte_unused,
3173                                  const struct rte_flow_attr *attr,
3174                                  struct rte_flow_error *error)
3175 {
3176         struct mlx5_priv *priv = dev->data->dev_private;
3177         struct mlx5_dev_config *config = &priv->config;
3178         const struct rte_flow_action_set_meta *conf;
3179         uint32_t nic_mask = UINT32_MAX;
3180         int reg;
3181
3182         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3183             !mlx5_flow_ext_mreg_supported(dev))
3184                 return rte_flow_error_set(error, ENOTSUP,
3185                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3186                                           "extended metadata register"
3187                                           " isn't supported");
3188         reg = flow_dv_get_metadata_reg(dev, attr, error);
3189         if (reg < 0)
3190                 return reg;
3191         if (reg == REG_NON)
3192                 return rte_flow_error_set(error, ENOTSUP,
3193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3194                                           "unavalable extended metadata register");
3195         if (reg != REG_A && reg != REG_B) {
3196                 struct mlx5_priv *priv = dev->data->dev_private;
3197
3198                 nic_mask = priv->sh->dv_meta_mask;
3199         }
3200         if (!(action->conf))
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "configuration cannot be null");
3204         conf = (const struct rte_flow_action_set_meta *)action->conf;
3205         if (!conf->mask)
3206                 return rte_flow_error_set(error, EINVAL,
3207                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3208                                           "zero mask doesn't have any effect");
3209         if (conf->mask & ~nic_mask)
3210                 return rte_flow_error_set(error, EINVAL,
3211                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3212                                           "meta data must be within reg C0");
3213         return 0;
3214 }
3215
3216 /**
3217  * Validate SET_TAG action.
3218  *
3219  * @param[in] dev
3220  *   Pointer to the rte_eth_dev structure.
3221  * @param[in] action
3222  *   Pointer to the action structure.
3223  * @param[in] action_flags
3224  *   Holds the actions detected until now.
3225  * @param[in] attr
3226  *   Pointer to flow attributes
3227  * @param[out] error
3228  *   Pointer to error structure.
3229  *
3230  * @return
3231  *   0 on success, a negative errno value otherwise and rte_errno is set.
3232  */
3233 static int
3234 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3235                                 const struct rte_flow_action *action,
3236                                 uint64_t action_flags,
3237                                 const struct rte_flow_attr *attr,
3238                                 struct rte_flow_error *error)
3239 {
3240         const struct rte_flow_action_set_tag *conf;
3241         const uint64_t terminal_action_flags =
3242                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3243                 MLX5_FLOW_ACTION_RSS;
3244         int ret;
3245
3246         if (!mlx5_flow_ext_mreg_supported(dev))
3247                 return rte_flow_error_set(error, ENOTSUP,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "extensive metadata register"
3250                                           " isn't supported");
3251         if (!(action->conf))
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "configuration cannot be null");
3255         conf = (const struct rte_flow_action_set_tag *)action->conf;
3256         if (!conf->mask)
3257                 return rte_flow_error_set(error, EINVAL,
3258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3259                                           "zero mask doesn't have any effect");
3260         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3261         if (ret < 0)
3262                 return ret;
3263         if (!attr->transfer && attr->ingress &&
3264             (action_flags & terminal_action_flags))
3265                 return rte_flow_error_set(error, EINVAL,
3266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3267                                           "set_tag has no effect"
3268                                           " with terminal actions");
3269         return 0;
3270 }
3271
3272 /**
3273  * Validate count action.
3274  *
3275  * @param[in] dev
3276  *   Pointer to rte_eth_dev structure.
3277  * @param[in] shared
3278  *   Indicator if action is shared.
3279  * @param[in] action_flags
3280  *   Holds the actions detected until now.
3281  * @param[out] error
3282  *   Pointer to error structure.
3283  *
3284  * @return
3285  *   0 on success, a negative errno value otherwise and rte_errno is set.
3286  */
3287 static int
3288 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3289                               uint64_t action_flags,
3290                               struct rte_flow_error *error)
3291 {
3292         struct mlx5_priv *priv = dev->data->dev_private;
3293
3294         if (!priv->sh->devx)
3295                 goto notsup_err;
3296         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3297                 return rte_flow_error_set(error, EINVAL,
3298                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3299                                           "duplicate count actions set");
3300         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3301             !priv->sh->flow_hit_aso_en)
3302                 return rte_flow_error_set(error, EINVAL,
3303                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3304                                           "old age and shared count combination is not supported");
3305 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3306         return 0;
3307 #endif
3308 notsup_err:
3309         return rte_flow_error_set
3310                       (error, ENOTSUP,
3311                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3312                        NULL,
3313                        "count action not supported");
3314 }
3315
3316 /**
3317  * Validate the L2 encap action.
3318  *
3319  * @param[in] dev
3320  *   Pointer to the rte_eth_dev structure.
3321  * @param[in] action_flags
3322  *   Holds the actions detected until now.
3323  * @param[in] action
3324  *   Pointer to the action structure.
3325  * @param[in] attr
3326  *   Pointer to flow attributes.
3327  * @param[out] error
3328  *   Pointer to error structure.
3329  *
3330  * @return
3331  *   0 on success, a negative errno value otherwise and rte_errno is set.
3332  */
3333 static int
3334 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3335                                  uint64_t action_flags,
3336                                  const struct rte_flow_action *action,
3337                                  const struct rte_flow_attr *attr,
3338                                  struct rte_flow_error *error)
3339 {
3340         const struct mlx5_priv *priv = dev->data->dev_private;
3341
3342         if (!(action->conf))
3343                 return rte_flow_error_set(error, EINVAL,
3344                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3345                                           "configuration cannot be null");
3346         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3347                 return rte_flow_error_set(error, EINVAL,
3348                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3349                                           "can only have a single encap action "
3350                                           "in a flow");
3351         if (!attr->transfer && priv->representor)
3352                 return rte_flow_error_set(error, ENOTSUP,
3353                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3354                                           "encap action for VF representor "
3355                                           "not supported on NIC table");
3356         return 0;
3357 }
3358
3359 /**
3360  * Validate a decap action.
3361  *
3362  * @param[in] dev
3363  *   Pointer to the rte_eth_dev structure.
3364  * @param[in] action_flags
3365  *   Holds the actions detected until now.
3366  * @param[in] action
3367  *   Pointer to the action structure.
3368  * @param[in] item_flags
3369  *   Holds the items detected.
3370  * @param[in] attr
3371  *   Pointer to flow attributes
3372  * @param[out] error
3373  *   Pointer to error structure.
3374  *
3375  * @return
3376  *   0 on success, a negative errno value otherwise and rte_errno is set.
3377  */
3378 static int
3379 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3380                               uint64_t action_flags,
3381                               const struct rte_flow_action *action,
3382                               const uint64_t item_flags,
3383                               const struct rte_flow_attr *attr,
3384                               struct rte_flow_error *error)
3385 {
3386         const struct mlx5_priv *priv = dev->data->dev_private;
3387
3388         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3389             !priv->config.decap_en)
3390                 return rte_flow_error_set(error, ENOTSUP,
3391                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3392                                           "decap is not enabled");
3393         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3394                 return rte_flow_error_set(error, ENOTSUP,
3395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3396                                           action_flags &
3397                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3398                                           "have a single decap action" : "decap "
3399                                           "after encap is not supported");
3400         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3401                 return rte_flow_error_set(error, EINVAL,
3402                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3403                                           "can't have decap action after"
3404                                           " modify action");
3405         if (attr->egress)
3406                 return rte_flow_error_set(error, ENOTSUP,
3407                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3408                                           NULL,
3409                                           "decap action not supported for "
3410                                           "egress");
3411         if (!attr->transfer && priv->representor)
3412                 return rte_flow_error_set(error, ENOTSUP,
3413                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3414                                           "decap action for VF representor "
3415                                           "not supported on NIC table");
3416         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3417             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3418                 return rte_flow_error_set(error, ENOTSUP,
3419                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3420                                 "VXLAN item should be present for VXLAN decap");
3421         return 0;
3422 }
3423
3424 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3425
3426 /**
3427  * Validate the raw encap and decap actions.
3428  *
3429  * @param[in] dev
3430  *   Pointer to the rte_eth_dev structure.
3431  * @param[in] decap
3432  *   Pointer to the decap action.
3433  * @param[in] encap
3434  *   Pointer to the encap action.
3435  * @param[in] attr
3436  *   Pointer to flow attributes
3437  * @param[in/out] action_flags
3438  *   Holds the actions detected until now.
3439  * @param[out] actions_n
3440  *   pointer to the number of actions counter.
3441  * @param[in] action
3442  *   Pointer to the action structure.
3443  * @param[in] item_flags
3444  *   Holds the items detected.
3445  * @param[out] error
3446  *   Pointer to error structure.
3447  *
3448  * @return
3449  *   0 on success, a negative errno value otherwise and rte_errno is set.
3450  */
3451 static int
3452 flow_dv_validate_action_raw_encap_decap
3453         (struct rte_eth_dev *dev,
3454          const struct rte_flow_action_raw_decap *decap,
3455          const struct rte_flow_action_raw_encap *encap,
3456          const struct rte_flow_attr *attr, uint64_t *action_flags,
3457          int *actions_n, const struct rte_flow_action *action,
3458          uint64_t item_flags, struct rte_flow_error *error)
3459 {
3460         const struct mlx5_priv *priv = dev->data->dev_private;
3461         int ret;
3462
3463         if (encap && (!encap->size || !encap->data))
3464                 return rte_flow_error_set(error, EINVAL,
3465                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3466                                           "raw encap data cannot be empty");
3467         if (decap && encap) {
3468                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3469                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3470                         /* L3 encap. */
3471                         decap = NULL;
3472                 else if (encap->size <=
3473                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3474                            decap->size >
3475                            MLX5_ENCAPSULATION_DECISION_SIZE)
3476                         /* L3 decap. */
3477                         encap = NULL;
3478                 else if (encap->size >
3479                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3480                            decap->size >
3481                            MLX5_ENCAPSULATION_DECISION_SIZE)
3482                         /* 2 L2 actions: encap and decap. */
3483                         ;
3484                 else
3485                         return rte_flow_error_set(error,
3486                                 ENOTSUP,
3487                                 RTE_FLOW_ERROR_TYPE_ACTION,
3488                                 NULL, "unsupported too small "
3489                                 "raw decap and too small raw "
3490                                 "encap combination");
3491         }
3492         if (decap) {
3493                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3494                                                     item_flags, attr, error);
3495                 if (ret < 0)
3496                         return ret;
3497                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3498                 ++(*actions_n);
3499         }
3500         if (encap) {
3501                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3502                         return rte_flow_error_set(error, ENOTSUP,
3503                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3504                                                   NULL,
3505                                                   "small raw encap size");
3506                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3507                         return rte_flow_error_set(error, EINVAL,
3508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3509                                                   NULL,
3510                                                   "more than one encap action");
3511                 if (!attr->transfer && priv->representor)
3512                         return rte_flow_error_set
3513                                         (error, ENOTSUP,
3514                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3515                                          "encap action for VF representor "
3516                                          "not supported on NIC table");
3517                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3518                 ++(*actions_n);
3519         }
3520         return 0;
3521 }
3522
3523 /*
3524  * Validate the ASO CT action.
3525  *
3526  * @param[in] dev
3527  *   Pointer to the rte_eth_dev structure.
3528  * @param[in] action_flags
3529  *   Holds the actions detected until now.
3530  * @param[in] item_flags
3531  *   The items found in this flow rule.
3532  * @param[in] attr
3533  *   Pointer to flow attributes.
3534  * @param[out] error
3535  *   Pointer to error structure.
3536  *
3537  * @return
3538  *   0 on success, a negative errno value otherwise and rte_errno is set.
3539  */
3540 static int
3541 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3542                                uint64_t action_flags,
3543                                uint64_t item_flags,
3544                                const struct rte_flow_attr *attr,
3545                                struct rte_flow_error *error)
3546 {
3547         RTE_SET_USED(dev);
3548
3549         if (attr->group == 0 && !attr->transfer)
3550                 return rte_flow_error_set(error, ENOTSUP,
3551                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3552                                           NULL,
3553                                           "Only support non-root table");
3554         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3555                 return rte_flow_error_set(error, ENOTSUP,
3556                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3557                                           "CT cannot follow a fate action");
3558         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3559             (action_flags & MLX5_FLOW_ACTION_AGE))
3560                 return rte_flow_error_set(error, EINVAL,
3561                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3562                                           "Only one ASO action is supported");
3563         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3564                 return rte_flow_error_set(error, EINVAL,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "Encap cannot exist before CT");
3567         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3568                 return rte_flow_error_set(error, EINVAL,
3569                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3570                                           "Not a outer TCP packet");
3571         return 0;
3572 }
3573
3574 int
3575 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3576                              struct mlx5_list_entry *entry, void *cb_ctx)
3577 {
3578         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3579         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3580         struct mlx5_flow_dv_encap_decap_resource *resource;
3581
3582         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3583                                 entry);
3584         if (resource->reformat_type == ctx_resource->reformat_type &&
3585             resource->ft_type == ctx_resource->ft_type &&
3586             resource->flags == ctx_resource->flags &&
3587             resource->size == ctx_resource->size &&
3588             !memcmp((const void *)resource->buf,
3589                     (const void *)ctx_resource->buf,
3590                     resource->size))
3591                 return 0;
3592         return -1;
3593 }
3594
3595 struct mlx5_list_entry *
3596 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3597 {
3598         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3599         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3600         struct mlx5dv_dr_domain *domain;
3601         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3602         struct mlx5_flow_dv_encap_decap_resource *resource;
3603         uint32_t idx;
3604         int ret;
3605
3606         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3607                 domain = sh->fdb_domain;
3608         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3609                 domain = sh->rx_domain;
3610         else
3611                 domain = sh->tx_domain;
3612         /* Register new encap/decap resource. */
3613         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3614         if (!resource) {
3615                 rte_flow_error_set(ctx->error, ENOMEM,
3616                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3617                                    "cannot allocate resource memory");
3618                 return NULL;
3619         }
3620         *resource = *ctx_resource;
3621         resource->idx = idx;
3622         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3623                                                               domain, resource,
3624                                                              &resource->action);
3625         if (ret) {
3626                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3627                 rte_flow_error_set(ctx->error, ENOMEM,
3628                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3629                                    NULL, "cannot create action");
3630                 return NULL;
3631         }
3632
3633         return &resource->entry;
3634 }
3635
3636 struct mlx5_list_entry *
3637 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3638                              void *cb_ctx)
3639 {
3640         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3641         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3642         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3643         uint32_t idx;
3644
3645         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3646                                            &idx);
3647         if (!cache_resource) {
3648                 rte_flow_error_set(ctx->error, ENOMEM,
3649                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3650                                    "cannot allocate resource memory");
3651                 return NULL;
3652         }
3653         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3654         cache_resource->idx = idx;
3655         return &cache_resource->entry;
3656 }
3657
3658 void
3659 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3660 {
3661         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3662         struct mlx5_flow_dv_encap_decap_resource *res =
3663                                        container_of(entry, typeof(*res), entry);
3664
3665         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3666 }
3667
3668 /**
3669  * Find existing encap/decap resource or create and register a new one.
3670  *
3671  * @param[in, out] dev
3672  *   Pointer to rte_eth_dev structure.
3673  * @param[in, out] resource
3674  *   Pointer to encap/decap resource.
3675  * @parm[in, out] dev_flow
3676  *   Pointer to the dev_flow.
3677  * @param[out] error
3678  *   pointer to error structure.
3679  *
3680  * @return
3681  *   0 on success otherwise -errno and errno is set.
3682  */
3683 static int
3684 flow_dv_encap_decap_resource_register
3685                         (struct rte_eth_dev *dev,
3686                          struct mlx5_flow_dv_encap_decap_resource *resource,
3687                          struct mlx5_flow *dev_flow,
3688                          struct rte_flow_error *error)
3689 {
3690         struct mlx5_priv *priv = dev->data->dev_private;
3691         struct mlx5_dev_ctx_shared *sh = priv->sh;
3692         struct mlx5_list_entry *entry;
3693         union {
3694                 struct {
3695                         uint32_t ft_type:8;
3696                         uint32_t refmt_type:8;
3697                         /*
3698                          * Header reformat actions can be shared between
3699                          * non-root tables. One bit to indicate non-root
3700                          * table or not.
3701                          */
3702                         uint32_t is_root:1;
3703                         uint32_t reserve:15;
3704                 };
3705                 uint32_t v32;
3706         } encap_decap_key = {
3707                 {
3708                         .ft_type = resource->ft_type,
3709                         .refmt_type = resource->reformat_type,
3710                         .is_root = !!dev_flow->dv.group,
3711                         .reserve = 0,
3712                 }
3713         };
3714         struct mlx5_flow_cb_ctx ctx = {
3715                 .error = error,
3716                 .data = resource,
3717         };
3718         struct mlx5_hlist *encaps_decaps;
3719         uint64_t key64;
3720
3721         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3722                                 "encaps_decaps",
3723                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3724                                 true, true, sh,
3725                                 flow_dv_encap_decap_create_cb,
3726                                 flow_dv_encap_decap_match_cb,
3727                                 flow_dv_encap_decap_remove_cb,
3728                                 flow_dv_encap_decap_clone_cb,
3729                                 flow_dv_encap_decap_clone_free_cb);
3730         if (unlikely(!encaps_decaps))
3731                 return -rte_errno;
3732         resource->flags = dev_flow->dv.group ? 0 : 1;
3733         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3734                                  sizeof(encap_decap_key.v32), 0);
3735         if (resource->reformat_type !=
3736             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3737             resource->size)
3738                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3739         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3740         if (!entry)
3741                 return -rte_errno;
3742         resource = container_of(entry, typeof(*resource), entry);
3743         dev_flow->dv.encap_decap = resource;
3744         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3745         return 0;
3746 }
3747
3748 /**
3749  * Find existing table jump resource or create and register a new one.
3750  *
3751  * @param[in, out] dev
3752  *   Pointer to rte_eth_dev structure.
3753  * @param[in, out] tbl
3754  *   Pointer to flow table resource.
3755  * @parm[in, out] dev_flow
3756  *   Pointer to the dev_flow.
3757  * @param[out] error
3758  *   pointer to error structure.
3759  *
3760  * @return
3761  *   0 on success otherwise -errno and errno is set.
3762  */
3763 static int
3764 flow_dv_jump_tbl_resource_register
3765                         (struct rte_eth_dev *dev __rte_unused,
3766                          struct mlx5_flow_tbl_resource *tbl,
3767                          struct mlx5_flow *dev_flow,
3768                          struct rte_flow_error *error __rte_unused)
3769 {
3770         struct mlx5_flow_tbl_data_entry *tbl_data =
3771                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3772
3773         MLX5_ASSERT(tbl);
3774         MLX5_ASSERT(tbl_data->jump.action);
3775         dev_flow->handle->rix_jump = tbl_data->idx;
3776         dev_flow->dv.jump = &tbl_data->jump;
3777         return 0;
3778 }
3779
3780 int
3781 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3782                          struct mlx5_list_entry *entry, void *cb_ctx)
3783 {
3784         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3785         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3786         struct mlx5_flow_dv_port_id_action_resource *res =
3787                                        container_of(entry, typeof(*res), entry);
3788
3789         return ref->port_id != res->port_id;
3790 }
3791
3792 struct mlx5_list_entry *
3793 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3794 {
3795         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3797         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3798         struct mlx5_flow_dv_port_id_action_resource *resource;
3799         uint32_t idx;
3800         int ret;
3801
3802         /* Register new port id action resource. */
3803         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3804         if (!resource) {
3805                 rte_flow_error_set(ctx->error, ENOMEM,
3806                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3807                                    "cannot allocate port_id action memory");
3808                 return NULL;
3809         }
3810         *resource = *ref;
3811         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3812                                                         ref->port_id,
3813                                                         &resource->action);
3814         if (ret) {
3815                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot create action");
3819                 return NULL;
3820         }
3821         resource->idx = idx;
3822         return &resource->entry;
3823 }
3824
3825 struct mlx5_list_entry *
3826 flow_dv_port_id_clone_cb(void *tool_ctx,
3827                          struct mlx5_list_entry *entry __rte_unused,
3828                          void *cb_ctx)
3829 {
3830         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3831         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3832         struct mlx5_flow_dv_port_id_action_resource *resource;
3833         uint32_t idx;
3834
3835         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3836         if (!resource) {
3837                 rte_flow_error_set(ctx->error, ENOMEM,
3838                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3839                                    "cannot allocate port_id action memory");
3840                 return NULL;
3841         }
3842         memcpy(resource, entry, sizeof(*resource));
3843         resource->idx = idx;
3844         return &resource->entry;
3845 }
3846
3847 void
3848 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3849 {
3850         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3851         struct mlx5_flow_dv_port_id_action_resource *resource =
3852                                   container_of(entry, typeof(*resource), entry);
3853
3854         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3855 }
3856
3857 /**
3858  * Find existing table port ID resource or create and register a new one.
3859  *
3860  * @param[in, out] dev
3861  *   Pointer to rte_eth_dev structure.
3862  * @param[in, out] ref
3863  *   Pointer to port ID action resource reference.
3864  * @parm[in, out] dev_flow
3865  *   Pointer to the dev_flow.
3866  * @param[out] error
3867  *   pointer to error structure.
3868  *
3869  * @return
3870  *   0 on success otherwise -errno and errno is set.
3871  */
3872 static int
3873 flow_dv_port_id_action_resource_register
3874                         (struct rte_eth_dev *dev,
3875                          struct mlx5_flow_dv_port_id_action_resource *ref,
3876                          struct mlx5_flow *dev_flow,
3877                          struct rte_flow_error *error)
3878 {
3879         struct mlx5_priv *priv = dev->data->dev_private;
3880         struct mlx5_list_entry *entry;
3881         struct mlx5_flow_dv_port_id_action_resource *resource;
3882         struct mlx5_flow_cb_ctx ctx = {
3883                 .error = error,
3884                 .data = ref,
3885         };
3886
3887         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3888         if (!entry)
3889                 return -rte_errno;
3890         resource = container_of(entry, typeof(*resource), entry);
3891         dev_flow->dv.port_id_action = resource;
3892         dev_flow->handle->rix_port_id_action = resource->idx;
3893         return 0;
3894 }
3895
3896 int
3897 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3898                            struct mlx5_list_entry *entry, void *cb_ctx)
3899 {
3900         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3901         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3902         struct mlx5_flow_dv_push_vlan_action_resource *res =
3903                                        container_of(entry, typeof(*res), entry);
3904
3905         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3906 }
3907
3908 struct mlx5_list_entry *
3909 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3910 {
3911         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3912         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3913         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3914         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3915         struct mlx5dv_dr_domain *domain;
3916         uint32_t idx;
3917         int ret;
3918
3919         /* Register new port id action resource. */
3920         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3921         if (!resource) {
3922                 rte_flow_error_set(ctx->error, ENOMEM,
3923                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3924                                    "cannot allocate push_vlan action memory");
3925                 return NULL;
3926         }
3927         *resource = *ref;
3928         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3929                 domain = sh->fdb_domain;
3930         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3931                 domain = sh->rx_domain;
3932         else
3933                 domain = sh->tx_domain;
3934         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3935                                                         &resource->action);
3936         if (ret) {
3937                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3938                 rte_flow_error_set(ctx->error, ENOMEM,
3939                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3940                                    "cannot create push vlan action");
3941                 return NULL;
3942         }
3943         resource->idx = idx;
3944         return &resource->entry;
3945 }
3946
3947 struct mlx5_list_entry *
3948 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3949                            struct mlx5_list_entry *entry __rte_unused,
3950                            void *cb_ctx)
3951 {
3952         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3953         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3954         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3955         uint32_t idx;
3956
3957         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3958         if (!resource) {
3959                 rte_flow_error_set(ctx->error, ENOMEM,
3960                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3961                                    "cannot allocate push_vlan action memory");
3962                 return NULL;
3963         }
3964         memcpy(resource, entry, sizeof(*resource));
3965         resource->idx = idx;
3966         return &resource->entry;
3967 }
3968
3969 void
3970 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3971 {
3972         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3973         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3974                                   container_of(entry, typeof(*resource), entry);
3975
3976         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3977 }
3978
3979 /**
3980  * Find existing push vlan resource or create and register a new one.
3981  *
3982  * @param [in, out] dev
3983  *   Pointer to rte_eth_dev structure.
3984  * @param[in, out] ref
3985  *   Pointer to port ID action resource reference.
3986  * @parm[in, out] dev_flow
3987  *   Pointer to the dev_flow.
3988  * @param[out] error
3989  *   pointer to error structure.
3990  *
3991  * @return
3992  *   0 on success otherwise -errno and errno is set.
3993  */
3994 static int
3995 flow_dv_push_vlan_action_resource_register
3996                        (struct rte_eth_dev *dev,
3997                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
3998                         struct mlx5_flow *dev_flow,
3999                         struct rte_flow_error *error)
4000 {
4001         struct mlx5_priv *priv = dev->data->dev_private;
4002         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4003         struct mlx5_list_entry *entry;
4004         struct mlx5_flow_cb_ctx ctx = {
4005                 .error = error,
4006                 .data = ref,
4007         };
4008
4009         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4010         if (!entry)
4011                 return -rte_errno;
4012         resource = container_of(entry, typeof(*resource), entry);
4013
4014         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4015         dev_flow->dv.push_vlan_res = resource;
4016         return 0;
4017 }
4018
4019 /**
4020  * Get the size of specific rte_flow_item_type hdr size
4021  *
4022  * @param[in] item_type
4023  *   Tested rte_flow_item_type.
4024  *
4025  * @return
4026  *   sizeof struct item_type, 0 if void or irrelevant.
4027  */
4028 static size_t
4029 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4030 {
4031         size_t retval;
4032
4033         switch (item_type) {
4034         case RTE_FLOW_ITEM_TYPE_ETH:
4035                 retval = sizeof(struct rte_ether_hdr);
4036                 break;
4037         case RTE_FLOW_ITEM_TYPE_VLAN:
4038                 retval = sizeof(struct rte_vlan_hdr);
4039                 break;
4040         case RTE_FLOW_ITEM_TYPE_IPV4:
4041                 retval = sizeof(struct rte_ipv4_hdr);
4042                 break;
4043         case RTE_FLOW_ITEM_TYPE_IPV6:
4044                 retval = sizeof(struct rte_ipv6_hdr);
4045                 break;
4046         case RTE_FLOW_ITEM_TYPE_UDP:
4047                 retval = sizeof(struct rte_udp_hdr);
4048                 break;
4049         case RTE_FLOW_ITEM_TYPE_TCP:
4050                 retval = sizeof(struct rte_tcp_hdr);
4051                 break;
4052         case RTE_FLOW_ITEM_TYPE_VXLAN:
4053         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4054                 retval = sizeof(struct rte_vxlan_hdr);
4055                 break;
4056         case RTE_FLOW_ITEM_TYPE_GRE:
4057         case RTE_FLOW_ITEM_TYPE_NVGRE:
4058                 retval = sizeof(struct rte_gre_hdr);
4059                 break;
4060         case RTE_FLOW_ITEM_TYPE_MPLS:
4061                 retval = sizeof(struct rte_mpls_hdr);
4062                 break;
4063         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4064         default:
4065                 retval = 0;
4066                 break;
4067         }
4068         return retval;
4069 }
4070
4071 #define MLX5_ENCAP_IPV4_VERSION         0x40
4072 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4073 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4074 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4075 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4076 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4077 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4078
4079 /**
4080  * Convert the encap action data from list of rte_flow_item to raw buffer
4081  *
4082  * @param[in] items
4083  *   Pointer to rte_flow_item objects list.
4084  * @param[out] buf
4085  *   Pointer to the output buffer.
4086  * @param[out] size
4087  *   Pointer to the output buffer size.
4088  * @param[out] error
4089  *   Pointer to the error structure.
4090  *
4091  * @return
4092  *   0 on success, a negative errno value otherwise and rte_errno is set.
4093  */
4094 static int
4095 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4096                            size_t *size, struct rte_flow_error *error)
4097 {
4098         struct rte_ether_hdr *eth = NULL;
4099         struct rte_vlan_hdr *vlan = NULL;
4100         struct rte_ipv4_hdr *ipv4 = NULL;
4101         struct rte_ipv6_hdr *ipv6 = NULL;
4102         struct rte_udp_hdr *udp = NULL;
4103         struct rte_vxlan_hdr *vxlan = NULL;
4104         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4105         struct rte_gre_hdr *gre = NULL;
4106         size_t len;
4107         size_t temp_size = 0;
4108
4109         if (!items)
4110                 return rte_flow_error_set(error, EINVAL,
4111                                           RTE_FLOW_ERROR_TYPE_ACTION,
4112                                           NULL, "invalid empty data");
4113         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4114                 len = flow_dv_get_item_hdr_len(items->type);
4115                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4116                         return rte_flow_error_set(error, EINVAL,
4117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4118                                                   (void *)items->type,
4119                                                   "items total size is too big"
4120                                                   " for encap action");
4121                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4122                 switch (items->type) {
4123                 case RTE_FLOW_ITEM_TYPE_ETH:
4124                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4125                         break;
4126                 case RTE_FLOW_ITEM_TYPE_VLAN:
4127                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4128                         if (!eth)
4129                                 return rte_flow_error_set(error, EINVAL,
4130                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4131                                                 (void *)items->type,
4132                                                 "eth header not found");
4133                         if (!eth->ether_type)
4134                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4135                         break;
4136                 case RTE_FLOW_ITEM_TYPE_IPV4:
4137                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4138                         if (!vlan && !eth)
4139                                 return rte_flow_error_set(error, EINVAL,
4140                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4141                                                 (void *)items->type,
4142                                                 "neither eth nor vlan"
4143                                                 " header found");
4144                         if (vlan && !vlan->eth_proto)
4145                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4146                         else if (eth && !eth->ether_type)
4147                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4148                         if (!ipv4->version_ihl)
4149                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4150                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4151                         if (!ipv4->time_to_live)
4152                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4153                         break;
4154                 case RTE_FLOW_ITEM_TYPE_IPV6:
4155                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4156                         if (!vlan && !eth)
4157                                 return rte_flow_error_set(error, EINVAL,
4158                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4159                                                 (void *)items->type,
4160                                                 "neither eth nor vlan"
4161                                                 " header found");
4162                         if (vlan && !vlan->eth_proto)
4163                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4164                         else if (eth && !eth->ether_type)
4165                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4166                         if (!ipv6->vtc_flow)
4167                                 ipv6->vtc_flow =
4168                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4169                         if (!ipv6->hop_limits)
4170                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_UDP:
4173                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4174                         if (!ipv4 && !ipv6)
4175                                 return rte_flow_error_set(error, EINVAL,
4176                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4177                                                 (void *)items->type,
4178                                                 "ip header not found");
4179                         if (ipv4 && !ipv4->next_proto_id)
4180                                 ipv4->next_proto_id = IPPROTO_UDP;
4181                         else if (ipv6 && !ipv6->proto)
4182                                 ipv6->proto = IPPROTO_UDP;
4183                         break;
4184                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4185                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4186                         if (!udp)
4187                                 return rte_flow_error_set(error, EINVAL,
4188                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4189                                                 (void *)items->type,
4190                                                 "udp header not found");
4191                         if (!udp->dst_port)
4192                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4193                         if (!vxlan->vx_flags)
4194                                 vxlan->vx_flags =
4195                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4196                         break;
4197                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4198                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4199                         if (!udp)
4200                                 return rte_flow_error_set(error, EINVAL,
4201                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4202                                                 (void *)items->type,
4203                                                 "udp header not found");
4204                         if (!vxlan_gpe->proto)
4205                                 return rte_flow_error_set(error, EINVAL,
4206                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4207                                                 (void *)items->type,
4208                                                 "next protocol not found");
4209                         if (!udp->dst_port)
4210                                 udp->dst_port =
4211                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4212                         if (!vxlan_gpe->vx_flags)
4213                                 vxlan_gpe->vx_flags =
4214                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4215                         break;
4216                 case RTE_FLOW_ITEM_TYPE_GRE:
4217                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4218                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4219                         if (!gre->proto)
4220                                 return rte_flow_error_set(error, EINVAL,
4221                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4222                                                 (void *)items->type,
4223                                                 "next protocol not found");
4224                         if (!ipv4 && !ipv6)
4225                                 return rte_flow_error_set(error, EINVAL,
4226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4227                                                 (void *)items->type,
4228                                                 "ip header not found");
4229                         if (ipv4 && !ipv4->next_proto_id)
4230                                 ipv4->next_proto_id = IPPROTO_GRE;
4231                         else if (ipv6 && !ipv6->proto)
4232                                 ipv6->proto = IPPROTO_GRE;
4233                         break;
4234                 case RTE_FLOW_ITEM_TYPE_VOID:
4235                         break;
4236                 default:
4237                         return rte_flow_error_set(error, EINVAL,
4238                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4239                                                   (void *)items->type,
4240                                                   "unsupported item type");
4241                         break;
4242                 }
4243                 temp_size += len;
4244         }
4245         *size = temp_size;
4246         return 0;
4247 }
4248
4249 static int
4250 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4251 {
4252         struct rte_ether_hdr *eth = NULL;
4253         struct rte_vlan_hdr *vlan = NULL;
4254         struct rte_ipv6_hdr *ipv6 = NULL;
4255         struct rte_udp_hdr *udp = NULL;
4256         char *next_hdr;
4257         uint16_t proto;
4258
4259         eth = (struct rte_ether_hdr *)data;
4260         next_hdr = (char *)(eth + 1);
4261         proto = RTE_BE16(eth->ether_type);
4262
4263         /* VLAN skipping */
4264         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4265                 vlan = (struct rte_vlan_hdr *)next_hdr;
4266                 proto = RTE_BE16(vlan->eth_proto);
4267                 next_hdr += sizeof(struct rte_vlan_hdr);
4268         }
4269
4270         /* HW calculates IPv4 csum. no need to proceed */
4271         if (proto == RTE_ETHER_TYPE_IPV4)
4272                 return 0;
4273
4274         /* non IPv4/IPv6 header. not supported */
4275         if (proto != RTE_ETHER_TYPE_IPV6) {
4276                 return rte_flow_error_set(error, ENOTSUP,
4277                                           RTE_FLOW_ERROR_TYPE_ACTION,
4278                                           NULL, "Cannot offload non IPv4/IPv6");
4279         }
4280
4281         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4282
4283         /* ignore non UDP */
4284         if (ipv6->proto != IPPROTO_UDP)
4285                 return 0;
4286
4287         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4288         udp->dgram_cksum = 0;
4289
4290         return 0;
4291 }
4292
4293 /**
4294  * Convert L2 encap action to DV specification.
4295  *
4296  * @param[in] dev
4297  *   Pointer to rte_eth_dev structure.
4298  * @param[in] action
4299  *   Pointer to action structure.
4300  * @param[in, out] dev_flow
4301  *   Pointer to the mlx5_flow.
4302  * @param[in] transfer
4303  *   Mark if the flow is E-Switch flow.
4304  * @param[out] error
4305  *   Pointer to the error structure.
4306  *
4307  * @return
4308  *   0 on success, a negative errno value otherwise and rte_errno is set.
4309  */
4310 static int
4311 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4312                                const struct rte_flow_action *action,
4313                                struct mlx5_flow *dev_flow,
4314                                uint8_t transfer,
4315                                struct rte_flow_error *error)
4316 {
4317         const struct rte_flow_item *encap_data;
4318         const struct rte_flow_action_raw_encap *raw_encap_data;
4319         struct mlx5_flow_dv_encap_decap_resource res = {
4320                 .reformat_type =
4321                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4322                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4323                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4324         };
4325
4326         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4327                 raw_encap_data =
4328                         (const struct rte_flow_action_raw_encap *)action->conf;
4329                 res.size = raw_encap_data->size;
4330                 memcpy(res.buf, raw_encap_data->data, res.size);
4331         } else {
4332                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4333                         encap_data =
4334                                 ((const struct rte_flow_action_vxlan_encap *)
4335                                                 action->conf)->definition;
4336                 else
4337                         encap_data =
4338                                 ((const struct rte_flow_action_nvgre_encap *)
4339                                                 action->conf)->definition;
4340                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4341                                                &res.size, error))
4342                         return -rte_errno;
4343         }
4344         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4345                 return -rte_errno;
4346         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4347                 return rte_flow_error_set(error, EINVAL,
4348                                           RTE_FLOW_ERROR_TYPE_ACTION,
4349                                           NULL, "can't create L2 encap action");
4350         return 0;
4351 }
4352
4353 /**
4354  * Convert L2 decap action to DV specification.
4355  *
4356  * @param[in] dev
4357  *   Pointer to rte_eth_dev structure.
4358  * @param[in, out] dev_flow
4359  *   Pointer to the mlx5_flow.
4360  * @param[in] transfer
4361  *   Mark if the flow is E-Switch flow.
4362  * @param[out] error
4363  *   Pointer to the error structure.
4364  *
4365  * @return
4366  *   0 on success, a negative errno value otherwise and rte_errno is set.
4367  */
4368 static int
4369 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4370                                struct mlx5_flow *dev_flow,
4371                                uint8_t transfer,
4372                                struct rte_flow_error *error)
4373 {
4374         struct mlx5_flow_dv_encap_decap_resource res = {
4375                 .size = 0,
4376                 .reformat_type =
4377                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4378                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4379                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4380         };
4381
4382         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4383                 return rte_flow_error_set(error, EINVAL,
4384                                           RTE_FLOW_ERROR_TYPE_ACTION,
4385                                           NULL, "can't create L2 decap action");
4386         return 0;
4387 }
4388
4389 /**
4390  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4391  *
4392  * @param[in] dev
4393  *   Pointer to rte_eth_dev structure.
4394  * @param[in] action
4395  *   Pointer to action structure.
4396  * @param[in, out] dev_flow
4397  *   Pointer to the mlx5_flow.
4398  * @param[in] attr
4399  *   Pointer to the flow attributes.
4400  * @param[out] error
4401  *   Pointer to the error structure.
4402  *
4403  * @return
4404  *   0 on success, a negative errno value otherwise and rte_errno is set.
4405  */
4406 static int
4407 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4408                                 const struct rte_flow_action *action,
4409                                 struct mlx5_flow *dev_flow,
4410                                 const struct rte_flow_attr *attr,
4411                                 struct rte_flow_error *error)
4412 {
4413         const struct rte_flow_action_raw_encap *encap_data;
4414         struct mlx5_flow_dv_encap_decap_resource res;
4415
4416         memset(&res, 0, sizeof(res));
4417         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4418         res.size = encap_data->size;
4419         memcpy(res.buf, encap_data->data, res.size);
4420         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4421                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4422                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4423         if (attr->transfer)
4424                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4425         else
4426                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4427                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4428         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4429                 return rte_flow_error_set(error, EINVAL,
4430                                           RTE_FLOW_ERROR_TYPE_ACTION,
4431                                           NULL, "can't create encap action");
4432         return 0;
4433 }
4434
4435 /**
4436  * Create action push VLAN.
4437  *
4438  * @param[in] dev
4439  *   Pointer to rte_eth_dev structure.
4440  * @param[in] attr
4441  *   Pointer to the flow attributes.
4442  * @param[in] vlan
4443  *   Pointer to the vlan to push to the Ethernet header.
4444  * @param[in, out] dev_flow
4445  *   Pointer to the mlx5_flow.
4446  * @param[out] error
4447  *   Pointer to the error structure.
4448  *
4449  * @return
4450  *   0 on success, a negative errno value otherwise and rte_errno is set.
4451  */
4452 static int
4453 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4454                                 const struct rte_flow_attr *attr,
4455                                 const struct rte_vlan_hdr *vlan,
4456                                 struct mlx5_flow *dev_flow,
4457                                 struct rte_flow_error *error)
4458 {
4459         struct mlx5_flow_dv_push_vlan_action_resource res;
4460
4461         memset(&res, 0, sizeof(res));
4462         res.vlan_tag =
4463                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4464                                  vlan->vlan_tci);
4465         if (attr->transfer)
4466                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4467         else
4468                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4469                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4470         return flow_dv_push_vlan_action_resource_register
4471                                             (dev, &res, dev_flow, error);
4472 }
4473
4474 /**
4475  * Validate the modify-header actions.
4476  *
4477  * @param[in] action_flags
4478  *   Holds the actions detected until now.
4479  * @param[in] action
4480  *   Pointer to the modify action.
4481  * @param[out] error
4482  *   Pointer to error structure.
4483  *
4484  * @return
4485  *   0 on success, a negative errno value otherwise and rte_errno is set.
4486  */
4487 static int
4488 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4489                                    const struct rte_flow_action *action,
4490                                    struct rte_flow_error *error)
4491 {
4492         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4493                 return rte_flow_error_set(error, EINVAL,
4494                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4495                                           NULL, "action configuration not set");
4496         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4497                 return rte_flow_error_set(error, EINVAL,
4498                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4499                                           "can't have encap action before"
4500                                           " modify action");
4501         return 0;
4502 }
4503
4504 /**
4505  * Validate the modify-header MAC address actions.
4506  *
4507  * @param[in] action_flags
4508  *   Holds the actions detected until now.
4509  * @param[in] action
4510  *   Pointer to the modify action.
4511  * @param[in] item_flags
4512  *   Holds the items detected.
4513  * @param[out] error
4514  *   Pointer to error structure.
4515  *
4516  * @return
4517  *   0 on success, a negative errno value otherwise and rte_errno is set.
4518  */
4519 static int
4520 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4521                                    const struct rte_flow_action *action,
4522                                    const uint64_t item_flags,
4523                                    struct rte_flow_error *error)
4524 {
4525         int ret = 0;
4526
4527         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4528         if (!ret) {
4529                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4530                         return rte_flow_error_set(error, EINVAL,
4531                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4532                                                   NULL,
4533                                                   "no L2 item in pattern");
4534         }
4535         return ret;
4536 }
4537
4538 /**
4539  * Validate the modify-header IPv4 address actions.
4540  *
4541  * @param[in] action_flags
4542  *   Holds the actions detected until now.
4543  * @param[in] action
4544  *   Pointer to the modify action.
4545  * @param[in] item_flags
4546  *   Holds the items detected.
4547  * @param[out] error
4548  *   Pointer to error structure.
4549  *
4550  * @return
4551  *   0 on success, a negative errno value otherwise and rte_errno is set.
4552  */
4553 static int
4554 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4555                                     const struct rte_flow_action *action,
4556                                     const uint64_t item_flags,
4557                                     struct rte_flow_error *error)
4558 {
4559         int ret = 0;
4560         uint64_t layer;
4561
4562         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4563         if (!ret) {
4564                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4565                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4566                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4567                 if (!(item_flags & layer))
4568                         return rte_flow_error_set(error, EINVAL,
4569                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4570                                                   NULL,
4571                                                   "no ipv4 item in pattern");
4572         }
4573         return ret;
4574 }
4575
4576 /**
4577  * Validate the modify-header IPv6 address actions.
4578  *
4579  * @param[in] action_flags
4580  *   Holds the actions detected until now.
4581  * @param[in] action
4582  *   Pointer to the modify action.
4583  * @param[in] item_flags
4584  *   Holds the items detected.
4585  * @param[out] error
4586  *   Pointer to error structure.
4587  *
4588  * @return
4589  *   0 on success, a negative errno value otherwise and rte_errno is set.
4590  */
4591 static int
4592 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4593                                     const struct rte_flow_action *action,
4594                                     const uint64_t item_flags,
4595                                     struct rte_flow_error *error)
4596 {
4597         int ret = 0;
4598         uint64_t layer;
4599
4600         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4601         if (!ret) {
4602                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4603                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4604                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4605                 if (!(item_flags & layer))
4606                         return rte_flow_error_set(error, EINVAL,
4607                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4608                                                   NULL,
4609                                                   "no ipv6 item in pattern");
4610         }
4611         return ret;
4612 }
4613
4614 /**
4615  * Validate the modify-header TP actions.
4616  *
4617  * @param[in] action_flags
4618  *   Holds the actions detected until now.
4619  * @param[in] action
4620  *   Pointer to the modify action.
4621  * @param[in] item_flags
4622  *   Holds the items detected.
4623  * @param[out] error
4624  *   Pointer to error structure.
4625  *
4626  * @return
4627  *   0 on success, a negative errno value otherwise and rte_errno is set.
4628  */
4629 static int
4630 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4631                                   const struct rte_flow_action *action,
4632                                   const uint64_t item_flags,
4633                                   struct rte_flow_error *error)
4634 {
4635         int ret = 0;
4636         uint64_t layer;
4637
4638         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4639         if (!ret) {
4640                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4641                                  MLX5_FLOW_LAYER_INNER_L4 :
4642                                  MLX5_FLOW_LAYER_OUTER_L4;
4643                 if (!(item_flags & layer))
4644                         return rte_flow_error_set(error, EINVAL,
4645                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4646                                                   NULL, "no transport layer "
4647                                                   "in pattern");
4648         }
4649         return ret;
4650 }
4651
4652 /**
4653  * Validate the modify-header actions of increment/decrement
4654  * TCP Sequence-number.
4655  *
4656  * @param[in] action_flags
4657  *   Holds the actions detected until now.
4658  * @param[in] action
4659  *   Pointer to the modify action.
4660  * @param[in] item_flags
4661  *   Holds the items detected.
4662  * @param[out] error
4663  *   Pointer to error structure.
4664  *
4665  * @return
4666  *   0 on success, a negative errno value otherwise and rte_errno is set.
4667  */
4668 static int
4669 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4670                                        const struct rte_flow_action *action,
4671                                        const uint64_t item_flags,
4672                                        struct rte_flow_error *error)
4673 {
4674         int ret = 0;
4675         uint64_t layer;
4676
4677         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4678         if (!ret) {
4679                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4680                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4681                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4682                 if (!(item_flags & layer))
4683                         return rte_flow_error_set(error, EINVAL,
4684                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4685                                                   NULL, "no TCP item in"
4686                                                   " pattern");
4687                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4688                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4689                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4690                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4691                         return rte_flow_error_set(error, EINVAL,
4692                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4693                                                   NULL,
4694                                                   "cannot decrease and increase"
4695                                                   " TCP sequence number"
4696                                                   " at the same time");
4697         }
4698         return ret;
4699 }
4700
4701 /**
4702  * Validate the modify-header actions of increment/decrement
4703  * TCP Acknowledgment number.
4704  *
4705  * @param[in] action_flags
4706  *   Holds the actions detected until now.
4707  * @param[in] action
4708  *   Pointer to the modify action.
4709  * @param[in] item_flags
4710  *   Holds the items detected.
4711  * @param[out] error
4712  *   Pointer to error structure.
4713  *
4714  * @return
4715  *   0 on success, a negative errno value otherwise and rte_errno is set.
4716  */
4717 static int
4718 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4719                                        const struct rte_flow_action *action,
4720                                        const uint64_t item_flags,
4721                                        struct rte_flow_error *error)
4722 {
4723         int ret = 0;
4724         uint64_t layer;
4725
4726         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4727         if (!ret) {
4728                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4729                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4730                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4731                 if (!(item_flags & layer))
4732                         return rte_flow_error_set(error, EINVAL,
4733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4734                                                   NULL, "no TCP item in"
4735                                                   " pattern");
4736                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4737                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4738                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4739                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4740                         return rte_flow_error_set(error, EINVAL,
4741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4742                                                   NULL,
4743                                                   "cannot decrease and increase"
4744                                                   " TCP acknowledgment number"
4745                                                   " at the same time");
4746         }
4747         return ret;
4748 }
4749
4750 /**
4751  * Validate the modify-header TTL actions.
4752  *
4753  * @param[in] action_flags
4754  *   Holds the actions detected until now.
4755  * @param[in] action
4756  *   Pointer to the modify action.
4757  * @param[in] item_flags
4758  *   Holds the items detected.
4759  * @param[out] error
4760  *   Pointer to error structure.
4761  *
4762  * @return
4763  *   0 on success, a negative errno value otherwise and rte_errno is set.
4764  */
4765 static int
4766 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4767                                    const struct rte_flow_action *action,
4768                                    const uint64_t item_flags,
4769                                    struct rte_flow_error *error)
4770 {
4771         int ret = 0;
4772         uint64_t layer;
4773
4774         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4775         if (!ret) {
4776                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4777                                  MLX5_FLOW_LAYER_INNER_L3 :
4778                                  MLX5_FLOW_LAYER_OUTER_L3;
4779                 if (!(item_flags & layer))
4780                         return rte_flow_error_set(error, EINVAL,
4781                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4782                                                   NULL,
4783                                                   "no IP protocol in pattern");
4784         }
4785         return ret;
4786 }
4787
4788 /**
4789  * Validate the generic modify field actions.
4790  * @param[in] dev
4791  *   Pointer to the rte_eth_dev structure.
4792  * @param[in] action_flags
4793  *   Holds the actions detected until now.
4794  * @param[in] action
4795  *   Pointer to the modify action.
4796  * @param[in] attr
4797  *   Pointer to the flow attributes.
4798  * @param[out] error
4799  *   Pointer to error structure.
4800  *
4801  * @return
4802  *   Number of header fields to modify (0 or more) on success,
4803  *   a negative errno value otherwise and rte_errno is set.
4804  */
4805 static int
4806 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4807                                    const uint64_t action_flags,
4808                                    const struct rte_flow_action *action,
4809                                    const struct rte_flow_attr *attr,
4810                                    struct rte_flow_error *error)
4811 {
4812         int ret = 0;
4813         struct mlx5_priv *priv = dev->data->dev_private;
4814         struct mlx5_dev_config *config = &priv->config;
4815         const struct rte_flow_action_modify_field *action_modify_field =
4816                 action->conf;
4817         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4818                                 action_modify_field->dst.field,
4819                                 -1, attr, error);
4820         uint32_t src_width = mlx5_flow_item_field_width(dev,
4821                                 action_modify_field->src.field,
4822                                 dst_width, attr, error);
4823
4824         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4825         if (ret)
4826                 return ret;
4827
4828         if (action_modify_field->width == 0)
4829                 return rte_flow_error_set(error, EINVAL,
4830                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4831                                 "no bits are requested to be modified");
4832         else if (action_modify_field->width > dst_width ||
4833                  action_modify_field->width > src_width)
4834                 return rte_flow_error_set(error, EINVAL,
4835                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4836                                 "cannot modify more bits than"
4837                                 " the width of a field");
4838         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4839             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4840                 if ((action_modify_field->dst.offset +
4841                      action_modify_field->width > dst_width) ||
4842                     (action_modify_field->dst.offset % 32))
4843                         return rte_flow_error_set(error, EINVAL,
4844                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4845                                         "destination offset is too big"
4846                                         " or not aligned to 4 bytes");
4847                 if (action_modify_field->dst.level &&
4848                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4849                         return rte_flow_error_set(error, ENOTSUP,
4850                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4851                                         "inner header fields modification"
4852                                         " is not supported");
4853         }
4854         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4855             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4856                 if (!attr->transfer && !attr->group)
4857                         return rte_flow_error_set(error, ENOTSUP,
4858                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4859                                         "modify field action is not"
4860                                         " supported for group 0");
4861                 if ((action_modify_field->src.offset +
4862                      action_modify_field->width > src_width) ||
4863                     (action_modify_field->src.offset % 32))
4864                         return rte_flow_error_set(error, EINVAL,
4865                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4866                                         "source offset is too big"
4867                                         " or not aligned to 4 bytes");
4868                 if (action_modify_field->src.level &&
4869                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4870                         return rte_flow_error_set(error, ENOTSUP,
4871                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4872                                         "inner header fields modification"
4873                                         " is not supported");
4874         }
4875         if ((action_modify_field->dst.field ==
4876              action_modify_field->src.field) &&
4877             (action_modify_field->dst.level ==
4878              action_modify_field->src.level))
4879                 return rte_flow_error_set(error, EINVAL,
4880                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4881                                 "source and destination fields"
4882                                 " cannot be the same");
4883         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4884             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4885             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4886                 return rte_flow_error_set(error, EINVAL,
4887                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4888                                 "mark, immediate value or a pointer to it"
4889                                 " cannot be used as a destination");
4890         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4891             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4892                 return rte_flow_error_set(error, ENOTSUP,
4893                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4894                                 "modifications of an arbitrary"
4895                                 " place in a packet is not supported");
4896         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4897             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4898                 return rte_flow_error_set(error, ENOTSUP,
4899                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4900                                 "modifications of the 802.1Q Tag"
4901                                 " Identifier is not supported");
4902         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4903             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4904                 return rte_flow_error_set(error, ENOTSUP,
4905                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4906                                 "modifications of the VXLAN Network"
4907                                 " Identifier is not supported");
4908         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4909             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4910                 return rte_flow_error_set(error, ENOTSUP,
4911                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4912                                 "modifications of the GENEVE Network"
4913                                 " Identifier is not supported");
4914         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4915             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4916                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4917                     !mlx5_flow_ext_mreg_supported(dev))
4918                         return rte_flow_error_set(error, ENOTSUP,
4919                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4920                                         "cannot modify mark in legacy mode"
4921                                         " or without extensive registers");
4922         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4923             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4924                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4925                     !mlx5_flow_ext_mreg_supported(dev))
4926                         return rte_flow_error_set(error, ENOTSUP,
4927                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4928                                         "cannot modify meta without"
4929                                         " extensive registers support");
4930                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4931                 if (ret < 0 || ret == REG_NON)
4932                         return rte_flow_error_set(error, ENOTSUP,
4933                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4934                                         "cannot modify meta without"
4935                                         " extensive registers available");
4936         }
4937         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4938                 return rte_flow_error_set(error, ENOTSUP,
4939                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4940                                 "add and sub operations"
4941                                 " are not supported");
4942         return (action_modify_field->width / 32) +
4943                !!(action_modify_field->width % 32);
4944 }
4945
4946 /**
4947  * Validate jump action.
4948  *
4949  * @param[in] action
4950  *   Pointer to the jump action.
4951  * @param[in] action_flags
4952  *   Holds the actions detected until now.
4953  * @param[in] attributes
4954  *   Pointer to flow attributes
4955  * @param[in] external
4956  *   Action belongs to flow rule created by request external to PMD.
4957  * @param[out] error
4958  *   Pointer to error structure.
4959  *
4960  * @return
4961  *   0 on success, a negative errno value otherwise and rte_errno is set.
4962  */
4963 static int
4964 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4965                              const struct mlx5_flow_tunnel *tunnel,
4966                              const struct rte_flow_action *action,
4967                              uint64_t action_flags,
4968                              const struct rte_flow_attr *attributes,
4969                              bool external, struct rte_flow_error *error)
4970 {
4971         uint32_t target_group, table;
4972         int ret = 0;
4973         struct flow_grp_info grp_info = {
4974                 .external = !!external,
4975                 .transfer = !!attributes->transfer,
4976                 .fdb_def_rule = 1,
4977                 .std_tbl_fix = 0
4978         };
4979         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4980                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4981                 return rte_flow_error_set(error, EINVAL,
4982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4983                                           "can't have 2 fate actions in"
4984                                           " same flow");
4985         if (!action->conf)
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4988                                           NULL, "action configuration not set");
4989         target_group =
4990                 ((const struct rte_flow_action_jump *)action->conf)->group;
4991         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4992                                        &grp_info, error);
4993         if (ret)
4994                 return ret;
4995         if (attributes->group == target_group &&
4996             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4997                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4998                 return rte_flow_error_set(error, EINVAL,
4999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5000                                           "target group must be other than"
5001                                           " the current flow group");
5002         return 0;
5003 }
5004
5005 /*
5006  * Validate action PORT_ID / REPRESENTED_PORT.
5007  *
5008  * @param[in] dev
5009  *   Pointer to rte_eth_dev structure.
5010  * @param[in] action_flags
5011  *   Bit-fields that holds the actions detected until now.
5012  * @param[in] action
5013  *   PORT_ID / REPRESENTED_PORT action structure.
5014  * @param[in] attr
5015  *   Attributes of flow that includes this action.
5016  * @param[out] error
5017  *   Pointer to error structure.
5018  *
5019  * @return
5020  *   0 on success, a negative errno value otherwise and rte_errno is set.
5021  */
5022 static int
5023 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5024                                 uint64_t action_flags,
5025                                 const struct rte_flow_action *action,
5026                                 const struct rte_flow_attr *attr,
5027                                 struct rte_flow_error *error)
5028 {
5029         const struct rte_flow_action_port_id *port_id;
5030         const struct rte_flow_action_ethdev *ethdev;
5031         struct mlx5_priv *act_priv;
5032         struct mlx5_priv *dev_priv;
5033         uint16_t port;
5034
5035         if (!attr->transfer)
5036                 return rte_flow_error_set(error, ENOTSUP,
5037                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5038                                           NULL,
5039                                           "port action is valid in transfer"
5040                                           " mode only");
5041         if (!action || !action->conf)
5042                 return rte_flow_error_set(error, ENOTSUP,
5043                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5044                                           NULL,
5045                                           "port action parameters must be"
5046                                           " specified");
5047         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5048                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5049                 return rte_flow_error_set(error, EINVAL,
5050                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5051                                           "can have only one fate actions in"
5052                                           " a flow");
5053         dev_priv = mlx5_dev_to_eswitch_info(dev);
5054         if (!dev_priv)
5055                 return rte_flow_error_set(error, rte_errno,
5056                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5057                                           NULL,
5058                                           "failed to obtain E-Switch info");
5059         switch (action->type) {
5060         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5061                 port_id = action->conf;
5062                 port = port_id->original ? dev->data->port_id : port_id->id;
5063                 break;
5064         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5065                 ethdev = action->conf;
5066                 port = ethdev->port_id;
5067                 break;
5068         default:
5069                 MLX5_ASSERT(false);
5070                 return rte_flow_error_set
5071                                 (error, EINVAL,
5072                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5073                                  "unknown E-Switch action");
5074         }
5075         act_priv = mlx5_port_to_eswitch_info(port, false);
5076         if (!act_priv)
5077                 return rte_flow_error_set
5078                                 (error, rte_errno,
5079                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5080                                  "failed to obtain E-Switch port id for port");
5081         if (act_priv->domain_id != dev_priv->domain_id)
5082                 return rte_flow_error_set
5083                                 (error, EINVAL,
5084                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5085                                  "port does not belong to"
5086                                  " E-Switch being configured");
5087         return 0;
5088 }
5089
5090 /**
5091  * Get the maximum number of modify header actions.
5092  *
5093  * @param dev
5094  *   Pointer to rte_eth_dev structure.
5095  * @param root
5096  *   Whether action is on root table.
5097  *
5098  * @return
5099  *   Max number of modify header actions device can support.
5100  */
5101 static inline unsigned int
5102 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5103                               bool root)
5104 {
5105         /*
5106          * There's no way to directly query the max capacity from FW.
5107          * The maximal value on root table should be assumed to be supported.
5108          */
5109         if (!root)
5110                 return MLX5_MAX_MODIFY_NUM;
5111         else
5112                 return MLX5_ROOT_TBL_MODIFY_NUM;
5113 }
5114
5115 /**
5116  * Validate the meter action.
5117  *
5118  * @param[in] dev
5119  *   Pointer to rte_eth_dev structure.
5120  * @param[in] action_flags
5121  *   Bit-fields that holds the actions detected until now.
5122  * @param[in] action
5123  *   Pointer to the meter action.
5124  * @param[in] attr
5125  *   Attributes of flow that includes this action.
5126  * @param[in] port_id_item
5127  *   Pointer to item indicating port id.
5128  * @param[out] error
5129  *   Pointer to error structure.
5130  *
5131  * @return
5132  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5133  */
5134 static int
5135 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5136                                 uint64_t action_flags,
5137                                 const struct rte_flow_action *action,
5138                                 const struct rte_flow_attr *attr,
5139                                 const struct rte_flow_item *port_id_item,
5140                                 bool *def_policy,
5141                                 struct rte_flow_error *error)
5142 {
5143         struct mlx5_priv *priv = dev->data->dev_private;
5144         const struct rte_flow_action_meter *am = action->conf;
5145         struct mlx5_flow_meter_info *fm;
5146         struct mlx5_flow_meter_policy *mtr_policy;
5147         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5148
5149         if (!am)
5150                 return rte_flow_error_set(error, EINVAL,
5151                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5152                                           "meter action conf is NULL");
5153
5154         if (action_flags & MLX5_FLOW_ACTION_METER)
5155                 return rte_flow_error_set(error, ENOTSUP,
5156                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5157                                           "meter chaining not support");
5158         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5159                 return rte_flow_error_set(error, ENOTSUP,
5160                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5161                                           "meter with jump not support");
5162         if (!priv->mtr_en)
5163                 return rte_flow_error_set(error, ENOTSUP,
5164                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5165                                           NULL,
5166                                           "meter action not supported");
5167         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5168         if (!fm)
5169                 return rte_flow_error_set(error, EINVAL,
5170                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5171                                           "Meter not found");
5172         /* aso meter can always be shared by different domains */
5173         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5174             !(fm->transfer == attr->transfer ||
5175               (!fm->ingress && !attr->ingress && attr->egress) ||
5176               (!fm->egress && !attr->egress && attr->ingress)))
5177                 return rte_flow_error_set(error, EINVAL,
5178                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5179                         "Flow attributes domain are either invalid "
5180                         "or have a domain conflict with current "
5181                         "meter attributes");
5182         if (fm->def_policy) {
5183                 if (!((attr->transfer &&
5184                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5185                         (attr->egress &&
5186                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5187                         (attr->ingress &&
5188                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5189                         return rte_flow_error_set(error, EINVAL,
5190                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5191                                           "Flow attributes domain "
5192                                           "have a conflict with current "
5193                                           "meter domain attributes");
5194                 *def_policy = true;
5195         } else {
5196                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5197                                                 fm->policy_id, NULL);
5198                 if (!mtr_policy)
5199                         return rte_flow_error_set(error, EINVAL,
5200                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5201                                           "Invalid policy id for meter ");
5202                 if (!((attr->transfer && mtr_policy->transfer) ||
5203                         (attr->egress && mtr_policy->egress) ||
5204                         (attr->ingress && mtr_policy->ingress)))
5205                         return rte_flow_error_set(error, EINVAL,
5206                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5207                                           "Flow attributes domain "
5208                                           "have a conflict with current "
5209                                           "meter domain attributes");
5210                 if (attr->transfer && mtr_policy->dev) {
5211                         /**
5212                          * When policy has fate action of port_id,
5213                          * the flow should have the same src port as policy.
5214                          */
5215                         struct mlx5_priv *policy_port_priv =
5216                                         mtr_policy->dev->data->dev_private;
5217                         int32_t flow_src_port = priv->representor_id;
5218
5219                         if (port_id_item) {
5220                                 const struct rte_flow_item_port_id *spec =
5221                                                         port_id_item->spec;
5222                                 struct mlx5_priv *port_priv =
5223                                         mlx5_port_to_eswitch_info(spec->id,
5224                                                                   false);
5225                                 if (!port_priv)
5226                                         return rte_flow_error_set(error,
5227                                                 rte_errno,
5228                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5229                                                 spec,
5230                                                 "Failed to get port info.");
5231                                 flow_src_port = port_priv->representor_id;
5232                         }
5233                         if (flow_src_port != policy_port_priv->representor_id)
5234                                 return rte_flow_error_set(error,
5235                                                 rte_errno,
5236                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5237                                                 NULL,
5238                                                 "Flow and meter policy "
5239                                                 "have different src port.");
5240                 }
5241                 *def_policy = false;
5242         }
5243         return 0;
5244 }
5245
5246 /**
5247  * Validate the age action.
5248  *
5249  * @param[in] action_flags
5250  *   Holds the actions detected until now.
5251  * @param[in] action
5252  *   Pointer to the age action.
5253  * @param[in] dev
5254  *   Pointer to the Ethernet device structure.
5255  * @param[out] error
5256  *   Pointer to error structure.
5257  *
5258  * @return
5259  *   0 on success, a negative errno value otherwise and rte_errno is set.
5260  */
5261 static int
5262 flow_dv_validate_action_age(uint64_t action_flags,
5263                             const struct rte_flow_action *action,
5264                             struct rte_eth_dev *dev,
5265                             struct rte_flow_error *error)
5266 {
5267         struct mlx5_priv *priv = dev->data->dev_private;
5268         const struct rte_flow_action_age *age = action->conf;
5269
5270         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5271             !priv->sh->aso_age_mng))
5272                 return rte_flow_error_set(error, ENOTSUP,
5273                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5274                                           NULL,
5275                                           "age action not supported");
5276         if (!(action->conf))
5277                 return rte_flow_error_set(error, EINVAL,
5278                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5279                                           "configuration cannot be null");
5280         if (!(age->timeout))
5281                 return rte_flow_error_set(error, EINVAL,
5282                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5283                                           "invalid timeout value 0");
5284         if (action_flags & MLX5_FLOW_ACTION_AGE)
5285                 return rte_flow_error_set(error, EINVAL,
5286                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5287                                           "duplicate age actions set");
5288         return 0;
5289 }
5290
5291 /**
5292  * Validate the modify-header IPv4 DSCP actions.
5293  *
5294  * @param[in] action_flags
5295  *   Holds the actions detected until now.
5296  * @param[in] action
5297  *   Pointer to the modify action.
5298  * @param[in] item_flags
5299  *   Holds the items detected.
5300  * @param[out] error
5301  *   Pointer to error structure.
5302  *
5303  * @return
5304  *   0 on success, a negative errno value otherwise and rte_errno is set.
5305  */
5306 static int
5307 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5308                                          const struct rte_flow_action *action,
5309                                          const uint64_t item_flags,
5310                                          struct rte_flow_error *error)
5311 {
5312         int ret = 0;
5313
5314         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5315         if (!ret) {
5316                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5317                         return rte_flow_error_set(error, EINVAL,
5318                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5319                                                   NULL,
5320                                                   "no ipv4 item in pattern");
5321         }
5322         return ret;
5323 }
5324
5325 /**
5326  * Validate the modify-header IPv6 DSCP actions.
5327  *
5328  * @param[in] action_flags
5329  *   Holds the actions detected until now.
5330  * @param[in] action
5331  *   Pointer to the modify action.
5332  * @param[in] item_flags
5333  *   Holds the items detected.
5334  * @param[out] error
5335  *   Pointer to error structure.
5336  *
5337  * @return
5338  *   0 on success, a negative errno value otherwise and rte_errno is set.
5339  */
5340 static int
5341 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5342                                          const struct rte_flow_action *action,
5343                                          const uint64_t item_flags,
5344                                          struct rte_flow_error *error)
5345 {
5346         int ret = 0;
5347
5348         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5349         if (!ret) {
5350                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5351                         return rte_flow_error_set(error, EINVAL,
5352                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5353                                                   NULL,
5354                                                   "no ipv6 item in pattern");
5355         }
5356         return ret;
5357 }
5358
5359 int
5360 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5361                         struct mlx5_list_entry *entry, void *cb_ctx)
5362 {
5363         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5364         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5365         struct mlx5_flow_dv_modify_hdr_resource *resource =
5366                                   container_of(entry, typeof(*resource), entry);
5367         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5368
5369         key_len += ref->actions_num * sizeof(ref->actions[0]);
5370         return ref->actions_num != resource->actions_num ||
5371                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5372 }
5373
5374 static struct mlx5_indexed_pool *
5375 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5376 {
5377         struct mlx5_indexed_pool *ipool = __atomic_load_n
5378                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5379
5380         if (!ipool) {
5381                 struct mlx5_indexed_pool *expected = NULL;
5382                 struct mlx5_indexed_pool_config cfg =
5383                     (struct mlx5_indexed_pool_config) {
5384                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5385                                                                    (index + 1) *
5386                                            sizeof(struct mlx5_modification_cmd),
5387                        .trunk_size = 64,
5388                        .grow_trunk = 3,
5389                        .grow_shift = 2,
5390                        .need_lock = 1,
5391                        .release_mem_en = !!sh->reclaim_mode,
5392                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5393                        .malloc = mlx5_malloc,
5394                        .free = mlx5_free,
5395                        .type = "mlx5_modify_action_resource",
5396                 };
5397
5398                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5399                 ipool = mlx5_ipool_create(&cfg);
5400                 if (!ipool)
5401                         return NULL;
5402                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5403                                                  &expected, ipool, false,
5404                                                  __ATOMIC_SEQ_CST,
5405                                                  __ATOMIC_SEQ_CST)) {
5406                         mlx5_ipool_destroy(ipool);
5407                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5408                                                 __ATOMIC_SEQ_CST);
5409                 }
5410         }
5411         return ipool;
5412 }
5413
5414 struct mlx5_list_entry *
5415 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5416 {
5417         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5418         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5419         struct mlx5dv_dr_domain *ns;
5420         struct mlx5_flow_dv_modify_hdr_resource *entry;
5421         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5422         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5423                                                           ref->actions_num - 1);
5424         int ret;
5425         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5426         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5427         uint32_t idx;
5428
5429         if (unlikely(!ipool)) {
5430                 rte_flow_error_set(ctx->error, ENOMEM,
5431                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5432                                    NULL, "cannot allocate modify ipool");
5433                 return NULL;
5434         }
5435         entry = mlx5_ipool_zmalloc(ipool, &idx);
5436         if (!entry) {
5437                 rte_flow_error_set(ctx->error, ENOMEM,
5438                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5439                                    "cannot allocate resource memory");
5440                 return NULL;
5441         }
5442         rte_memcpy(&entry->ft_type,
5443                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5444                    key_len + data_len);
5445         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5446                 ns = sh->fdb_domain;
5447         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5448                 ns = sh->tx_domain;
5449         else
5450                 ns = sh->rx_domain;
5451         ret = mlx5_flow_os_create_flow_action_modify_header
5452                                         (sh->cdev->ctx, ns, entry,
5453                                          data_len, &entry->action);
5454         if (ret) {
5455                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5456                 rte_flow_error_set(ctx->error, ENOMEM,
5457                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5458                                    NULL, "cannot create modification action");
5459                 return NULL;
5460         }
5461         entry->idx = idx;
5462         return &entry->entry;
5463 }
5464
5465 struct mlx5_list_entry *
5466 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5467                         void *cb_ctx)
5468 {
5469         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5470         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5471         struct mlx5_flow_dv_modify_hdr_resource *entry;
5472         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5473         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5474         uint32_t idx;
5475
5476         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5477                                   &idx);
5478         if (!entry) {
5479                 rte_flow_error_set(ctx->error, ENOMEM,
5480                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5481                                    "cannot allocate resource memory");
5482                 return NULL;
5483         }
5484         memcpy(entry, oentry, sizeof(*entry) + data_len);
5485         entry->idx = idx;
5486         return &entry->entry;
5487 }
5488
5489 void
5490 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5491 {
5492         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5493         struct mlx5_flow_dv_modify_hdr_resource *res =
5494                 container_of(entry, typeof(*res), entry);
5495
5496         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5497 }
5498
5499 /**
5500  * Validate the sample action.
5501  *
5502  * @param[in, out] action_flags
5503  *   Holds the actions detected until now.
5504  * @param[in] action
5505  *   Pointer to the sample action.
5506  * @param[in] dev
5507  *   Pointer to the Ethernet device structure.
5508  * @param[in] attr
5509  *   Attributes of flow that includes this action.
5510  * @param[in] item_flags
5511  *   Holds the items detected.
5512  * @param[in] rss
5513  *   Pointer to the RSS action.
5514  * @param[out] sample_rss
5515  *   Pointer to the RSS action in sample action list.
5516  * @param[out] count
5517  *   Pointer to the COUNT action in sample action list.
5518  * @param[out] fdb_mirror_limit
5519  *   Pointer to the FDB mirror limitation flag.
5520  * @param[out] error
5521  *   Pointer to error structure.
5522  *
5523  * @return
5524  *   0 on success, a negative errno value otherwise and rte_errno is set.
5525  */
5526 static int
5527 flow_dv_validate_action_sample(uint64_t *action_flags,
5528                                const struct rte_flow_action *action,
5529                                struct rte_eth_dev *dev,
5530                                const struct rte_flow_attr *attr,
5531                                uint64_t item_flags,
5532                                const struct rte_flow_action_rss *rss,
5533                                const struct rte_flow_action_rss **sample_rss,
5534                                const struct rte_flow_action_count **count,
5535                                int *fdb_mirror_limit,
5536                                struct rte_flow_error *error)
5537 {
5538         struct mlx5_priv *priv = dev->data->dev_private;
5539         struct mlx5_dev_config *dev_conf = &priv->config;
5540         const struct rte_flow_action_sample *sample = action->conf;
5541         const struct rte_flow_action *act;
5542         uint64_t sub_action_flags = 0;
5543         uint16_t queue_index = 0xFFFF;
5544         int actions_n = 0;
5545         int ret;
5546
5547         if (!sample)
5548                 return rte_flow_error_set(error, EINVAL,
5549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5550                                           "configuration cannot be NULL");
5551         if (sample->ratio == 0)
5552                 return rte_flow_error_set(error, EINVAL,
5553                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5554                                           "ratio value starts from 1");
5555         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5556                 return rte_flow_error_set(error, ENOTSUP,
5557                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5558                                           NULL,
5559                                           "sample action not supported");
5560         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5561                 return rte_flow_error_set(error, EINVAL,
5562                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5563                                           "Multiple sample actions not "
5564                                           "supported");
5565         if (*action_flags & MLX5_FLOW_ACTION_METER)
5566                 return rte_flow_error_set(error, EINVAL,
5567                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5568                                           "wrong action order, meter should "
5569                                           "be after sample action");
5570         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5571                 return rte_flow_error_set(error, EINVAL,
5572                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5573                                           "wrong action order, jump should "
5574                                           "be after sample action");
5575         if (*action_flags & MLX5_FLOW_ACTION_CT)
5576                 return rte_flow_error_set(error, EINVAL,
5577                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5578                                           "Sample after CT not supported");
5579         act = sample->actions;
5580         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5581                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5582                         return rte_flow_error_set(error, ENOTSUP,
5583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5584                                                   act, "too many actions");
5585                 switch (act->type) {
5586                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5587                         ret = mlx5_flow_validate_action_queue(act,
5588                                                               sub_action_flags,
5589                                                               dev,
5590                                                               attr, error);
5591                         if (ret < 0)
5592                                 return ret;
5593                         queue_index = ((const struct rte_flow_action_queue *)
5594                                                         (act->conf))->index;
5595                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5596                         ++actions_n;
5597                         break;
5598                 case RTE_FLOW_ACTION_TYPE_RSS:
5599                         *sample_rss = act->conf;
5600                         ret = mlx5_flow_validate_action_rss(act,
5601                                                             sub_action_flags,
5602                                                             dev, attr,
5603                                                             item_flags,
5604                                                             error);
5605                         if (ret < 0)
5606                                 return ret;
5607                         if (rss && *sample_rss &&
5608                             ((*sample_rss)->level != rss->level ||
5609                             (*sample_rss)->types != rss->types))
5610                                 return rte_flow_error_set(error, ENOTSUP,
5611                                         RTE_FLOW_ERROR_TYPE_ACTION,
5612                                         NULL,
5613                                         "Can't use the different RSS types "
5614                                         "or level in the same flow");
5615                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5616                                 queue_index = (*sample_rss)->queue[0];
5617                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5618                         ++actions_n;
5619                         break;
5620                 case RTE_FLOW_ACTION_TYPE_MARK:
5621                         ret = flow_dv_validate_action_mark(dev, act,
5622                                                            sub_action_flags,
5623                                                            attr, error);
5624                         if (ret < 0)
5625                                 return ret;
5626                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5627                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5628                                                 MLX5_FLOW_ACTION_MARK_EXT;
5629                         else
5630                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5631                         ++actions_n;
5632                         break;
5633                 case RTE_FLOW_ACTION_TYPE_COUNT:
5634                         ret = flow_dv_validate_action_count
5635                                 (dev, false, *action_flags | sub_action_flags,
5636                                  error);
5637                         if (ret < 0)
5638                                 return ret;
5639                         *count = act->conf;
5640                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5641                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5642                         ++actions_n;
5643                         break;
5644                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5645                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5646                         ret = flow_dv_validate_action_port_id(dev,
5647                                                               sub_action_flags,
5648                                                               act,
5649                                                               attr,
5650                                                               error);
5651                         if (ret)
5652                                 return ret;
5653                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5654                         ++actions_n;
5655                         break;
5656                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5657                         ret = flow_dv_validate_action_raw_encap_decap
5658                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5659                                  &actions_n, action, item_flags, error);
5660                         if (ret < 0)
5661                                 return ret;
5662                         ++actions_n;
5663                         break;
5664                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5665                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5666                         ret = flow_dv_validate_action_l2_encap(dev,
5667                                                                sub_action_flags,
5668                                                                act, attr,
5669                                                                error);
5670                         if (ret < 0)
5671                                 return ret;
5672                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5673                         ++actions_n;
5674                         break;
5675                 default:
5676                         return rte_flow_error_set(error, ENOTSUP,
5677                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5678                                                   NULL,
5679                                                   "Doesn't support optional "
5680                                                   "action");
5681                 }
5682         }
5683         if (attr->ingress && !attr->transfer) {
5684                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5685                                           MLX5_FLOW_ACTION_RSS)))
5686                         return rte_flow_error_set(error, EINVAL,
5687                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5688                                                   NULL,
5689                                                   "Ingress must has a dest "
5690                                                   "QUEUE for Sample");
5691         } else if (attr->egress && !attr->transfer) {
5692                 return rte_flow_error_set(error, ENOTSUP,
5693                                           RTE_FLOW_ERROR_TYPE_ACTION,
5694                                           NULL,
5695                                           "Sample Only support Ingress "
5696                                           "or E-Switch");
5697         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5698                 MLX5_ASSERT(attr->transfer);
5699                 if (sample->ratio > 1)
5700                         return rte_flow_error_set(error, ENOTSUP,
5701                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5702                                                   NULL,
5703                                                   "E-Switch doesn't support "
5704                                                   "any optional action "
5705                                                   "for sampling");
5706                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5707                         return rte_flow_error_set(error, ENOTSUP,
5708                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5709                                                   NULL,
5710                                                   "unsupported action QUEUE");
5711                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5712                         return rte_flow_error_set(error, ENOTSUP,
5713                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5714                                                   NULL,
5715                                                   "unsupported action QUEUE");
5716                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5717                         return rte_flow_error_set(error, EINVAL,
5718                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5719                                                   NULL,
5720                                                   "E-Switch must has a dest "
5721                                                   "port for mirroring");
5722                 if (!priv->config.hca_attr.reg_c_preserve &&
5723                      priv->representor_id != UINT16_MAX)
5724                         *fdb_mirror_limit = 1;
5725         }
5726         /* Continue validation for Xcap actions.*/
5727         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5728             (queue_index == 0xFFFF ||
5729              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5730                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5731                      MLX5_FLOW_XCAP_ACTIONS)
5732                         return rte_flow_error_set(error, ENOTSUP,
5733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5734                                                   NULL, "encap and decap "
5735                                                   "combination aren't "
5736                                                   "supported");
5737                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5738                                                         MLX5_FLOW_ACTION_ENCAP))
5739                         return rte_flow_error_set(error, ENOTSUP,
5740                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5741                                                   NULL, "encap is not supported"
5742                                                   " for ingress traffic");
5743         }
5744         return 0;
5745 }
5746
5747 /**
5748  * Find existing modify-header resource or create and register a new one.
5749  *
5750  * @param dev[in, out]
5751  *   Pointer to rte_eth_dev structure.
5752  * @param[in, out] resource
5753  *   Pointer to modify-header resource.
5754  * @parm[in, out] dev_flow
5755  *   Pointer to the dev_flow.
5756  * @param[out] error
5757  *   pointer to error structure.
5758  *
5759  * @return
5760  *   0 on success otherwise -errno and errno is set.
5761  */
5762 static int
5763 flow_dv_modify_hdr_resource_register
5764                         (struct rte_eth_dev *dev,
5765                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5766                          struct mlx5_flow *dev_flow,
5767                          struct rte_flow_error *error)
5768 {
5769         struct mlx5_priv *priv = dev->data->dev_private;
5770         struct mlx5_dev_ctx_shared *sh = priv->sh;
5771         uint32_t key_len = sizeof(*resource) -
5772                            offsetof(typeof(*resource), ft_type) +
5773                            resource->actions_num * sizeof(resource->actions[0]);
5774         struct mlx5_list_entry *entry;
5775         struct mlx5_flow_cb_ctx ctx = {
5776                 .error = error,
5777                 .data = resource,
5778         };
5779         struct mlx5_hlist *modify_cmds;
5780         uint64_t key64;
5781
5782         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5783                                 "hdr_modify",
5784                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5785                                 true, false, sh,
5786                                 flow_dv_modify_create_cb,
5787                                 flow_dv_modify_match_cb,
5788                                 flow_dv_modify_remove_cb,
5789                                 flow_dv_modify_clone_cb,
5790                                 flow_dv_modify_clone_free_cb);
5791         if (unlikely(!modify_cmds))
5792                 return -rte_errno;
5793         resource->root = !dev_flow->dv.group;
5794         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5795                                                                 resource->root))
5796                 return rte_flow_error_set(error, EOVERFLOW,
5797                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5798                                           "too many modify header items");
5799         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5800         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5801         if (!entry)
5802                 return -rte_errno;
5803         resource = container_of(entry, typeof(*resource), entry);
5804         dev_flow->handle->dvh.modify_hdr = resource;
5805         return 0;
5806 }
5807
5808 /**
5809  * Get DV flow counter by index.
5810  *
5811  * @param[in] dev
5812  *   Pointer to the Ethernet device structure.
5813  * @param[in] idx
5814  *   mlx5 flow counter index in the container.
5815  * @param[out] ppool
5816  *   mlx5 flow counter pool in the container.
5817  *
5818  * @return
5819  *   Pointer to the counter, NULL otherwise.
5820  */
5821 static struct mlx5_flow_counter *
5822 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5823                            uint32_t idx,
5824                            struct mlx5_flow_counter_pool **ppool)
5825 {
5826         struct mlx5_priv *priv = dev->data->dev_private;
5827         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5828         struct mlx5_flow_counter_pool *pool;
5829
5830         /* Decrease to original index and clear shared bit. */
5831         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5832         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5833         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5834         MLX5_ASSERT(pool);
5835         if (ppool)
5836                 *ppool = pool;
5837         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5838 }
5839
5840 /**
5841  * Check the devx counter belongs to the pool.
5842  *
5843  * @param[in] pool
5844  *   Pointer to the counter pool.
5845  * @param[in] id
5846  *   The counter devx ID.
5847  *
5848  * @return
5849  *   True if counter belongs to the pool, false otherwise.
5850  */
5851 static bool
5852 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5853 {
5854         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5855                    MLX5_COUNTERS_PER_POOL;
5856
5857         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5858                 return true;
5859         return false;
5860 }
5861
5862 /**
5863  * Get a pool by devx counter ID.
5864  *
5865  * @param[in] cmng
5866  *   Pointer to the counter management.
5867  * @param[in] id
5868  *   The counter devx ID.
5869  *
5870  * @return
5871  *   The counter pool pointer if exists, NULL otherwise,
5872  */
5873 static struct mlx5_flow_counter_pool *
5874 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5875 {
5876         uint32_t i;
5877         struct mlx5_flow_counter_pool *pool = NULL;
5878
5879         rte_spinlock_lock(&cmng->pool_update_sl);
5880         /* Check last used pool. */
5881         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5882             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5883                 pool = cmng->pools[cmng->last_pool_idx];
5884                 goto out;
5885         }
5886         /* ID out of range means no suitable pool in the container. */
5887         if (id > cmng->max_id || id < cmng->min_id)
5888                 goto out;
5889         /*
5890          * Find the pool from the end of the container, since mostly counter
5891          * ID is sequence increasing, and the last pool should be the needed
5892          * one.
5893          */
5894         i = cmng->n_valid;
5895         while (i--) {
5896                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5897
5898                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5899                         pool = pool_tmp;
5900                         break;
5901                 }
5902         }
5903 out:
5904         rte_spinlock_unlock(&cmng->pool_update_sl);
5905         return pool;
5906 }
5907
5908 /**
5909  * Resize a counter container.
5910  *
5911  * @param[in] dev
5912  *   Pointer to the Ethernet device structure.
5913  *
5914  * @return
5915  *   0 on success, otherwise negative errno value and rte_errno is set.
5916  */
5917 static int
5918 flow_dv_container_resize(struct rte_eth_dev *dev)
5919 {
5920         struct mlx5_priv *priv = dev->data->dev_private;
5921         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5922         void *old_pools = cmng->pools;
5923         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5924         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5925         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5926
5927         if (!pools) {
5928                 rte_errno = ENOMEM;
5929                 return -ENOMEM;
5930         }
5931         if (old_pools)
5932                 memcpy(pools, old_pools, cmng->n *
5933                                        sizeof(struct mlx5_flow_counter_pool *));
5934         cmng->n = resize;
5935         cmng->pools = pools;
5936         if (old_pools)
5937                 mlx5_free(old_pools);
5938         return 0;
5939 }
5940
5941 /**
5942  * Query a devx flow counter.
5943  *
5944  * @param[in] dev
5945  *   Pointer to the Ethernet device structure.
5946  * @param[in] counter
5947  *   Index to the flow counter.
5948  * @param[out] pkts
5949  *   The statistics value of packets.
5950  * @param[out] bytes
5951  *   The statistics value of bytes.
5952  *
5953  * @return
5954  *   0 on success, otherwise a negative errno value and rte_errno is set.
5955  */
5956 static inline int
5957 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5958                      uint64_t *bytes)
5959 {
5960         struct mlx5_priv *priv = dev->data->dev_private;
5961         struct mlx5_flow_counter_pool *pool = NULL;
5962         struct mlx5_flow_counter *cnt;
5963         int offset;
5964
5965         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5966         MLX5_ASSERT(pool);
5967         if (priv->sh->cmng.counter_fallback)
5968                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5969                                         0, pkts, bytes, 0, NULL, NULL, 0);
5970         rte_spinlock_lock(&pool->sl);
5971         if (!pool->raw) {
5972                 *pkts = 0;
5973                 *bytes = 0;
5974         } else {
5975                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5976                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5977                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5978         }
5979         rte_spinlock_unlock(&pool->sl);
5980         return 0;
5981 }
5982
5983 /**
5984  * Create and initialize a new counter pool.
5985  *
5986  * @param[in] dev
5987  *   Pointer to the Ethernet device structure.
5988  * @param[out] dcs
5989  *   The devX counter handle.
5990  * @param[in] age
5991  *   Whether the pool is for counter that was allocated for aging.
5992  * @param[in/out] cont_cur
5993  *   Pointer to the container pointer, it will be update in pool resize.
5994  *
5995  * @return
5996  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5997  */
5998 static struct mlx5_flow_counter_pool *
5999 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6000                     uint32_t age)
6001 {
6002         struct mlx5_priv *priv = dev->data->dev_private;
6003         struct mlx5_flow_counter_pool *pool;
6004         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6005         bool fallback = priv->sh->cmng.counter_fallback;
6006         uint32_t size = sizeof(*pool);
6007
6008         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6009         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6010         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6011         if (!pool) {
6012                 rte_errno = ENOMEM;
6013                 return NULL;
6014         }
6015         pool->raw = NULL;
6016         pool->is_aged = !!age;
6017         pool->query_gen = 0;
6018         pool->min_dcs = dcs;
6019         rte_spinlock_init(&pool->sl);
6020         rte_spinlock_init(&pool->csl);
6021         TAILQ_INIT(&pool->counters[0]);
6022         TAILQ_INIT(&pool->counters[1]);
6023         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6024         rte_spinlock_lock(&cmng->pool_update_sl);
6025         pool->index = cmng->n_valid;
6026         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6027                 mlx5_free(pool);
6028                 rte_spinlock_unlock(&cmng->pool_update_sl);
6029                 return NULL;
6030         }
6031         cmng->pools[pool->index] = pool;
6032         cmng->n_valid++;
6033         if (unlikely(fallback)) {
6034                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6035
6036                 if (base < cmng->min_id)
6037                         cmng->min_id = base;
6038                 if (base > cmng->max_id)
6039                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6040                 cmng->last_pool_idx = pool->index;
6041         }
6042         rte_spinlock_unlock(&cmng->pool_update_sl);
6043         return pool;
6044 }
6045
6046 /**
6047  * Prepare a new counter and/or a new counter pool.
6048  *
6049  * @param[in] dev
6050  *   Pointer to the Ethernet device structure.
6051  * @param[out] cnt_free
6052  *   Where to put the pointer of a new counter.
6053  * @param[in] age
6054  *   Whether the pool is for counter that was allocated for aging.
6055  *
6056  * @return
6057  *   The counter pool pointer and @p cnt_free is set on success,
6058  *   NULL otherwise and rte_errno is set.
6059  */
6060 static struct mlx5_flow_counter_pool *
6061 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6062                              struct mlx5_flow_counter **cnt_free,
6063                              uint32_t age)
6064 {
6065         struct mlx5_priv *priv = dev->data->dev_private;
6066         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6067         struct mlx5_flow_counter_pool *pool;
6068         struct mlx5_counters tmp_tq;
6069         struct mlx5_devx_obj *dcs = NULL;
6070         struct mlx5_flow_counter *cnt;
6071         enum mlx5_counter_type cnt_type =
6072                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6073         bool fallback = priv->sh->cmng.counter_fallback;
6074         uint32_t i;
6075
6076         if (fallback) {
6077                 /* bulk_bitmap must be 0 for single counter allocation. */
6078                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6079                 if (!dcs)
6080                         return NULL;
6081                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6082                 if (!pool) {
6083                         pool = flow_dv_pool_create(dev, dcs, age);
6084                         if (!pool) {
6085                                 mlx5_devx_cmd_destroy(dcs);
6086                                 return NULL;
6087                         }
6088                 }
6089                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6090                 cnt = MLX5_POOL_GET_CNT(pool, i);
6091                 cnt->pool = pool;
6092                 cnt->dcs_when_free = dcs;
6093                 *cnt_free = cnt;
6094                 return pool;
6095         }
6096         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6097         if (!dcs) {
6098                 rte_errno = ENODATA;
6099                 return NULL;
6100         }
6101         pool = flow_dv_pool_create(dev, dcs, age);
6102         if (!pool) {
6103                 mlx5_devx_cmd_destroy(dcs);
6104                 return NULL;
6105         }
6106         TAILQ_INIT(&tmp_tq);
6107         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6108                 cnt = MLX5_POOL_GET_CNT(pool, i);
6109                 cnt->pool = pool;
6110                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6111         }
6112         rte_spinlock_lock(&cmng->csl[cnt_type]);
6113         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6114         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6115         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6116         (*cnt_free)->pool = pool;
6117         return pool;
6118 }
6119
6120 /**
6121  * Allocate a flow counter.
6122  *
6123  * @param[in] dev
6124  *   Pointer to the Ethernet device structure.
6125  * @param[in] age
6126  *   Whether the counter was allocated for aging.
6127  *
6128  * @return
6129  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6130  */
6131 static uint32_t
6132 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6133 {
6134         struct mlx5_priv *priv = dev->data->dev_private;
6135         struct mlx5_flow_counter_pool *pool = NULL;
6136         struct mlx5_flow_counter *cnt_free = NULL;
6137         bool fallback = priv->sh->cmng.counter_fallback;
6138         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6139         enum mlx5_counter_type cnt_type =
6140                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6141         uint32_t cnt_idx;
6142
6143         if (!priv->sh->devx) {
6144                 rte_errno = ENOTSUP;
6145                 return 0;
6146         }
6147         /* Get free counters from container. */
6148         rte_spinlock_lock(&cmng->csl[cnt_type]);
6149         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6150         if (cnt_free)
6151                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6152         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6153         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6154                 goto err;
6155         pool = cnt_free->pool;
6156         if (fallback)
6157                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6158         /* Create a DV counter action only in the first time usage. */
6159         if (!cnt_free->action) {
6160                 uint16_t offset;
6161                 struct mlx5_devx_obj *dcs;
6162                 int ret;
6163
6164                 if (!fallback) {
6165                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6166                         dcs = pool->min_dcs;
6167                 } else {
6168                         offset = 0;
6169                         dcs = cnt_free->dcs_when_free;
6170                 }
6171                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6172                                                             &cnt_free->action);
6173                 if (ret) {
6174                         rte_errno = errno;
6175                         goto err;
6176                 }
6177         }
6178         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6179                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6180         /* Update the counter reset values. */
6181         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6182                                  &cnt_free->bytes))
6183                 goto err;
6184         if (!fallback && !priv->sh->cmng.query_thread_on)
6185                 /* Start the asynchronous batch query by the host thread. */
6186                 mlx5_set_query_alarm(priv->sh);
6187         /*
6188          * When the count action isn't shared (by ID), shared_info field is
6189          * used for indirect action API's refcnt.
6190          * When the counter action is not shared neither by ID nor by indirect
6191          * action API, shared info must be 1.
6192          */
6193         cnt_free->shared_info.refcnt = 1;
6194         return cnt_idx;
6195 err:
6196         if (cnt_free) {
6197                 cnt_free->pool = pool;
6198                 if (fallback)
6199                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6200                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6201                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6202                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6203         }
6204         return 0;
6205 }
6206
6207 /**
6208  * Get age param from counter index.
6209  *
6210  * @param[in] dev
6211  *   Pointer to the Ethernet device structure.
6212  * @param[in] counter
6213  *   Index to the counter handler.
6214  *
6215  * @return
6216  *   The aging parameter specified for the counter index.
6217  */
6218 static struct mlx5_age_param*
6219 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6220                                 uint32_t counter)
6221 {
6222         struct mlx5_flow_counter *cnt;
6223         struct mlx5_flow_counter_pool *pool = NULL;
6224
6225         flow_dv_counter_get_by_idx(dev, counter, &pool);
6226         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6227         cnt = MLX5_POOL_GET_CNT(pool, counter);
6228         return MLX5_CNT_TO_AGE(cnt);
6229 }
6230
6231 /**
6232  * Remove a flow counter from aged counter list.
6233  *
6234  * @param[in] dev
6235  *   Pointer to the Ethernet device structure.
6236  * @param[in] counter
6237  *   Index to the counter handler.
6238  * @param[in] cnt
6239  *   Pointer to the counter handler.
6240  */
6241 static void
6242 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6243                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6244 {
6245         struct mlx5_age_info *age_info;
6246         struct mlx5_age_param *age_param;
6247         struct mlx5_priv *priv = dev->data->dev_private;
6248         uint16_t expected = AGE_CANDIDATE;
6249
6250         age_info = GET_PORT_AGE_INFO(priv);
6251         age_param = flow_dv_counter_idx_get_age(dev, counter);
6252         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6253                                          AGE_FREE, false, __ATOMIC_RELAXED,
6254                                          __ATOMIC_RELAXED)) {
6255                 /**
6256                  * We need the lock even it is age timeout,
6257                  * since counter may still in process.
6258                  */
6259                 rte_spinlock_lock(&age_info->aged_sl);
6260                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6261                 rte_spinlock_unlock(&age_info->aged_sl);
6262                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6263         }
6264 }
6265
6266 /**
6267  * Release a flow counter.
6268  *
6269  * @param[in] dev
6270  *   Pointer to the Ethernet device structure.
6271  * @param[in] counter
6272  *   Index to the counter handler.
6273  */
6274 static void
6275 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6276 {
6277         struct mlx5_priv *priv = dev->data->dev_private;
6278         struct mlx5_flow_counter_pool *pool = NULL;
6279         struct mlx5_flow_counter *cnt;
6280         enum mlx5_counter_type cnt_type;
6281
6282         if (!counter)
6283                 return;
6284         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6285         MLX5_ASSERT(pool);
6286         if (pool->is_aged) {
6287                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6288         } else {
6289                 /*
6290                  * If the counter action is shared by indirect action API,
6291                  * the atomic function reduces its references counter.
6292                  * If after the reduction the action is still referenced, the
6293                  * function returns here and does not release it.
6294                  * When the counter action is not shared by
6295                  * indirect action API, shared info is 1 before the reduction,
6296                  * so this condition is failed and function doesn't return here.
6297                  */
6298                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6299                                        __ATOMIC_RELAXED))
6300                         return;
6301         }
6302         cnt->pool = pool;
6303         /*
6304          * Put the counter back to list to be updated in none fallback mode.
6305          * Currently, we are using two list alternately, while one is in query,
6306          * add the freed counter to the other list based on the pool query_gen
6307          * value. After query finishes, add counter the list to the global
6308          * container counter list. The list changes while query starts. In
6309          * this case, lock will not be needed as query callback and release
6310          * function both operate with the different list.
6311          */
6312         if (!priv->sh->cmng.counter_fallback) {
6313                 rte_spinlock_lock(&pool->csl);
6314                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6315                 rte_spinlock_unlock(&pool->csl);
6316         } else {
6317                 cnt->dcs_when_free = cnt->dcs_when_active;
6318                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6319                                            MLX5_COUNTER_TYPE_ORIGIN;
6320                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6321                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6322                                   cnt, next);
6323                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6324         }
6325 }
6326
6327 /**
6328  * Resize a meter id container.
6329  *
6330  * @param[in] dev
6331  *   Pointer to the Ethernet device structure.
6332  *
6333  * @return
6334  *   0 on success, otherwise negative errno value and rte_errno is set.
6335  */
6336 static int
6337 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6338 {
6339         struct mlx5_priv *priv = dev->data->dev_private;
6340         struct mlx5_aso_mtr_pools_mng *pools_mng =
6341                                 &priv->sh->mtrmng->pools_mng;
6342         void *old_pools = pools_mng->pools;
6343         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6344         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6345         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6346
6347         if (!pools) {
6348                 rte_errno = ENOMEM;
6349                 return -ENOMEM;
6350         }
6351         if (!pools_mng->n)
6352                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6353                         mlx5_free(pools);
6354                         return -ENOMEM;
6355                 }
6356         if (old_pools)
6357                 memcpy(pools, old_pools, pools_mng->n *
6358                                        sizeof(struct mlx5_aso_mtr_pool *));
6359         pools_mng->n = resize;
6360         pools_mng->pools = pools;
6361         if (old_pools)
6362                 mlx5_free(old_pools);
6363         return 0;
6364 }
6365
6366 /**
6367  * Prepare a new meter and/or a new meter pool.
6368  *
6369  * @param[in] dev
6370  *   Pointer to the Ethernet device structure.
6371  * @param[out] mtr_free
6372  *   Where to put the pointer of a new meter.g.
6373  *
6374  * @return
6375  *   The meter pool pointer and @mtr_free is set on success,
6376  *   NULL otherwise and rte_errno is set.
6377  */
6378 static struct mlx5_aso_mtr_pool *
6379 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6380 {
6381         struct mlx5_priv *priv = dev->data->dev_private;
6382         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6383         struct mlx5_aso_mtr_pool *pool = NULL;
6384         struct mlx5_devx_obj *dcs = NULL;
6385         uint32_t i;
6386         uint32_t log_obj_size;
6387
6388         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6389         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6390                                                       priv->sh->cdev->pdn,
6391                                                       log_obj_size);
6392         if (!dcs) {
6393                 rte_errno = ENODATA;
6394                 return NULL;
6395         }
6396         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6397         if (!pool) {
6398                 rte_errno = ENOMEM;
6399                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6400                 return NULL;
6401         }
6402         pool->devx_obj = dcs;
6403         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6404         pool->index = pools_mng->n_valid;
6405         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6406                 mlx5_free(pool);
6407                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6408                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6409                 return NULL;
6410         }
6411         pools_mng->pools[pool->index] = pool;
6412         pools_mng->n_valid++;
6413         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6414         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6415                 pool->mtrs[i].offset = i;
6416                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6417         }
6418         pool->mtrs[0].offset = 0;
6419         *mtr_free = &pool->mtrs[0];
6420         return pool;
6421 }
6422
6423 /**
6424  * Release a flow meter into pool.
6425  *
6426  * @param[in] dev
6427  *   Pointer to the Ethernet device structure.
6428  * @param[in] mtr_idx
6429  *   Index to aso flow meter.
6430  */
6431 static void
6432 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6433 {
6434         struct mlx5_priv *priv = dev->data->dev_private;
6435         struct mlx5_aso_mtr_pools_mng *pools_mng =
6436                                 &priv->sh->mtrmng->pools_mng;
6437         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6438
6439         MLX5_ASSERT(aso_mtr);
6440         rte_spinlock_lock(&pools_mng->mtrsl);
6441         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6442         aso_mtr->state = ASO_METER_FREE;
6443         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6444         rte_spinlock_unlock(&pools_mng->mtrsl);
6445 }
6446
6447 /**
6448  * Allocate a aso flow meter.
6449  *
6450  * @param[in] dev
6451  *   Pointer to the Ethernet device structure.
6452  *
6453  * @return
6454  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6455  */
6456 static uint32_t
6457 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6458 {
6459         struct mlx5_priv *priv = dev->data->dev_private;
6460         struct mlx5_aso_mtr *mtr_free = NULL;
6461         struct mlx5_aso_mtr_pools_mng *pools_mng =
6462                                 &priv->sh->mtrmng->pools_mng;
6463         struct mlx5_aso_mtr_pool *pool;
6464         uint32_t mtr_idx = 0;
6465
6466         if (!priv->sh->devx) {
6467                 rte_errno = ENOTSUP;
6468                 return 0;
6469         }
6470         /* Allocate the flow meter memory. */
6471         /* Get free meters from management. */
6472         rte_spinlock_lock(&pools_mng->mtrsl);
6473         mtr_free = LIST_FIRST(&pools_mng->meters);
6474         if (mtr_free)
6475                 LIST_REMOVE(mtr_free, next);
6476         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6477                 rte_spinlock_unlock(&pools_mng->mtrsl);
6478                 return 0;
6479         }
6480         mtr_free->state = ASO_METER_WAIT;
6481         rte_spinlock_unlock(&pools_mng->mtrsl);
6482         pool = container_of(mtr_free,
6483                         struct mlx5_aso_mtr_pool,
6484                         mtrs[mtr_free->offset]);
6485         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6486         if (!mtr_free->fm.meter_action) {
6487 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6488                 struct rte_flow_error error;
6489                 uint8_t reg_id;
6490
6491                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6492                 mtr_free->fm.meter_action =
6493                         mlx5_glue->dv_create_flow_action_aso
6494                                                 (priv->sh->rx_domain,
6495                                                  pool->devx_obj->obj,
6496                                                  mtr_free->offset,
6497                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6498                                                  reg_id - REG_C_0);
6499 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6500                 if (!mtr_free->fm.meter_action) {
6501                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6502                         return 0;
6503                 }
6504         }
6505         return mtr_idx;
6506 }
6507
6508 /**
6509  * Verify the @p attributes will be correctly understood by the NIC and store
6510  * them in the @p flow if everything is correct.
6511  *
6512  * @param[in] dev
6513  *   Pointer to dev struct.
6514  * @param[in] attributes
6515  *   Pointer to flow attributes
6516  * @param[in] external
6517  *   This flow rule is created by request external to PMD.
6518  * @param[out] error
6519  *   Pointer to error structure.
6520  *
6521  * @return
6522  *   - 0 on success and non root table.
6523  *   - 1 on success and root table.
6524  *   - a negative errno value otherwise and rte_errno is set.
6525  */
6526 static int
6527 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6528                             const struct mlx5_flow_tunnel *tunnel,
6529                             const struct rte_flow_attr *attributes,
6530                             const struct flow_grp_info *grp_info,
6531                             struct rte_flow_error *error)
6532 {
6533         struct mlx5_priv *priv = dev->data->dev_private;
6534         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6535         int ret = 0;
6536
6537 #ifndef HAVE_MLX5DV_DR
6538         RTE_SET_USED(tunnel);
6539         RTE_SET_USED(grp_info);
6540         if (attributes->group)
6541                 return rte_flow_error_set(error, ENOTSUP,
6542                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6543                                           NULL,
6544                                           "groups are not supported");
6545 #else
6546         uint32_t table = 0;
6547
6548         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6549                                        grp_info, error);
6550         if (ret)
6551                 return ret;
6552         if (!table)
6553                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6554 #endif
6555         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6556             attributes->priority > lowest_priority)
6557                 return rte_flow_error_set(error, ENOTSUP,
6558                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6559                                           NULL,
6560                                           "priority out of range");
6561         if (attributes->transfer) {
6562                 if (!priv->config.dv_esw_en)
6563                         return rte_flow_error_set
6564                                 (error, ENOTSUP,
6565                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6566                                  "E-Switch dr is not supported");
6567                 if (!(priv->representor || priv->master))
6568                         return rte_flow_error_set
6569                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6570                                  NULL, "E-Switch configuration can only be"
6571                                  " done by a master or a representor device");
6572                 if (attributes->egress)
6573                         return rte_flow_error_set
6574                                 (error, ENOTSUP,
6575                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6576                                  "egress is not supported");
6577         }
6578         if (!(attributes->egress ^ attributes->ingress))
6579                 return rte_flow_error_set(error, ENOTSUP,
6580                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6581                                           "must specify exactly one of "
6582                                           "ingress or egress");
6583         return ret;
6584 }
6585
6586 static int
6587 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6588                         int64_t pattern_flags, uint64_t l3_flags,
6589                         uint64_t l4_flags, uint64_t ip4_flag,
6590                         struct rte_flow_error *error)
6591 {
6592         if (mask->l3_ok && !(pattern_flags & l3_flags))
6593                 return rte_flow_error_set(error, EINVAL,
6594                                           RTE_FLOW_ERROR_TYPE_ITEM,
6595                                           NULL, "missing L3 protocol");
6596
6597         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6598                 return rte_flow_error_set(error, EINVAL,
6599                                           RTE_FLOW_ERROR_TYPE_ITEM,
6600                                           NULL, "missing IPv4 protocol");
6601
6602         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6603                 return rte_flow_error_set(error, EINVAL,
6604                                           RTE_FLOW_ERROR_TYPE_ITEM,
6605                                           NULL, "missing L4 protocol");
6606
6607         return 0;
6608 }
6609
6610 static int
6611 flow_dv_validate_item_integrity_post(const struct
6612                                      rte_flow_item *integrity_items[2],
6613                                      int64_t pattern_flags,
6614                                      struct rte_flow_error *error)
6615 {
6616         const struct rte_flow_item_integrity *mask;
6617         int ret;
6618
6619         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6620                 mask = (typeof(mask))integrity_items[0]->mask;
6621                 ret = validate_integrity_bits(mask, pattern_flags,
6622                                               MLX5_FLOW_LAYER_OUTER_L3,
6623                                               MLX5_FLOW_LAYER_OUTER_L4,
6624                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6625                                               error);
6626                 if (ret)
6627                         return ret;
6628         }
6629         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6630                 mask = (typeof(mask))integrity_items[1]->mask;
6631                 ret = validate_integrity_bits(mask, pattern_flags,
6632                                               MLX5_FLOW_LAYER_INNER_L3,
6633                                               MLX5_FLOW_LAYER_INNER_L4,
6634                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6635                                               error);
6636                 if (ret)
6637                         return ret;
6638         }
6639         return 0;
6640 }
6641
6642 static int
6643 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6644                                 const struct rte_flow_item *integrity_item,
6645                                 uint64_t pattern_flags, uint64_t *last_item,
6646                                 const struct rte_flow_item *integrity_items[2],
6647                                 struct rte_flow_error *error)
6648 {
6649         struct mlx5_priv *priv = dev->data->dev_private;
6650         const struct rte_flow_item_integrity *mask = (typeof(mask))
6651                                                      integrity_item->mask;
6652         const struct rte_flow_item_integrity *spec = (typeof(spec))
6653                                                      integrity_item->spec;
6654
6655         if (!priv->config.hca_attr.pkt_integrity_match)
6656                 return rte_flow_error_set(error, ENOTSUP,
6657                                           RTE_FLOW_ERROR_TYPE_ITEM,
6658                                           integrity_item,
6659                                           "packet integrity integrity_item not supported");
6660         if (!spec)
6661                 return rte_flow_error_set(error, ENOTSUP,
6662                                           RTE_FLOW_ERROR_TYPE_ITEM,
6663                                           integrity_item,
6664                                           "no spec for integrity item");
6665         if (!mask)
6666                 mask = &rte_flow_item_integrity_mask;
6667         if (!mlx5_validate_integrity_item(mask))
6668                 return rte_flow_error_set(error, ENOTSUP,
6669                                           RTE_FLOW_ERROR_TYPE_ITEM,
6670                                           integrity_item,
6671                                           "unsupported integrity filter");
6672         if (spec->level > 1) {
6673                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6674                         return rte_flow_error_set
6675                                 (error, ENOTSUP,
6676                                  RTE_FLOW_ERROR_TYPE_ITEM,
6677                                  NULL, "multiple inner integrity items not supported");
6678                 integrity_items[1] = integrity_item;
6679                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6680         } else {
6681                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6682                         return rte_flow_error_set
6683                                 (error, ENOTSUP,
6684                                  RTE_FLOW_ERROR_TYPE_ITEM,
6685                                  NULL, "multiple outer integrity items not supported");
6686                 integrity_items[0] = integrity_item;
6687                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6688         }
6689         return 0;
6690 }
6691
6692 static int
6693 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6694                            const struct rte_flow_item *item,
6695                            uint64_t item_flags,
6696                            uint64_t *last_item,
6697                            bool is_inner,
6698                            struct rte_flow_error *error)
6699 {
6700         const struct rte_flow_item_flex *flow_spec = item->spec;
6701         const struct rte_flow_item_flex *flow_mask = item->mask;
6702         struct mlx5_flex_item *flex;
6703
6704         if (!flow_spec)
6705                 return rte_flow_error_set(error, EINVAL,
6706                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6707                                           "flex flow item spec cannot be NULL");
6708         if (!flow_mask)
6709                 return rte_flow_error_set(error, EINVAL,
6710                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6711                                           "flex flow item mask cannot be NULL");
6712         if (item->last)
6713                 return rte_flow_error_set(error, ENOTSUP,
6714                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6715                                           "flex flow item last not supported");
6716         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6717                 return rte_flow_error_set(error, EINVAL,
6718                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6719                                           "invalid flex flow item handle");
6720         flex = (struct mlx5_flex_item *)flow_spec->handle;
6721         switch (flex->tunnel_mode) {
6722         case FLEX_TUNNEL_MODE_SINGLE:
6723                 if (item_flags &
6724                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6725                         rte_flow_error_set(error, EINVAL,
6726                                            RTE_FLOW_ERROR_TYPE_ITEM,
6727                                            NULL, "multiple flex items not supported");
6728                 break;
6729         case FLEX_TUNNEL_MODE_OUTER:
6730                 if (is_inner)
6731                         rte_flow_error_set(error, EINVAL,
6732                                            RTE_FLOW_ERROR_TYPE_ITEM,
6733                                            NULL, "inner flex item was not configured");
6734                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6735                         rte_flow_error_set(error, ENOTSUP,
6736                                            RTE_FLOW_ERROR_TYPE_ITEM,
6737                                            NULL, "multiple flex items not supported");
6738                 break;
6739         case FLEX_TUNNEL_MODE_INNER:
6740                 if (!is_inner)
6741                         rte_flow_error_set(error, EINVAL,
6742                                            RTE_FLOW_ERROR_TYPE_ITEM,
6743                                            NULL, "outer flex item was not configured");
6744                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6745                         rte_flow_error_set(error, EINVAL,
6746                                            RTE_FLOW_ERROR_TYPE_ITEM,
6747                                            NULL, "multiple flex items not supported");
6748                 break;
6749         case FLEX_TUNNEL_MODE_MULTI:
6750                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6751                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6752                         rte_flow_error_set(error, EINVAL,
6753                                            RTE_FLOW_ERROR_TYPE_ITEM,
6754                                            NULL, "multiple flex items not supported");
6755                 }
6756                 break;
6757         case FLEX_TUNNEL_MODE_TUNNEL:
6758                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6759                         rte_flow_error_set(error, EINVAL,
6760                                            RTE_FLOW_ERROR_TYPE_ITEM,
6761                                            NULL, "multiple flex tunnel items not supported");
6762                 break;
6763         default:
6764                 rte_flow_error_set(error, EINVAL,
6765                                    RTE_FLOW_ERROR_TYPE_ITEM,
6766                                    NULL, "invalid flex item configuration");
6767         }
6768         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6769                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6770                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6771         return 0;
6772 }
6773
6774 /**
6775  * Internal validation function. For validating both actions and items.
6776  *
6777  * @param[in] dev
6778  *   Pointer to the rte_eth_dev structure.
6779  * @param[in] attr
6780  *   Pointer to the flow attributes.
6781  * @param[in] items
6782  *   Pointer to the list of items.
6783  * @param[in] actions
6784  *   Pointer to the list of actions.
6785  * @param[in] external
6786  *   This flow rule is created by request external to PMD.
6787  * @param[in] hairpin
6788  *   Number of hairpin TX actions, 0 means classic flow.
6789  * @param[out] error
6790  *   Pointer to the error structure.
6791  *
6792  * @return
6793  *   0 on success, a negative errno value otherwise and rte_errno is set.
6794  */
6795 static int
6796 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6797                  const struct rte_flow_item items[],
6798                  const struct rte_flow_action actions[],
6799                  bool external, int hairpin, struct rte_flow_error *error)
6800 {
6801         int ret;
6802         uint64_t action_flags = 0;
6803         uint64_t item_flags = 0;
6804         uint64_t last_item = 0;
6805         uint8_t next_protocol = 0xff;
6806         uint16_t ether_type = 0;
6807         int actions_n = 0;
6808         uint8_t item_ipv6_proto = 0;
6809         int fdb_mirror_limit = 0;
6810         int modify_after_mirror = 0;
6811         const struct rte_flow_item *geneve_item = NULL;
6812         const struct rte_flow_item *gre_item = NULL;
6813         const struct rte_flow_item *gtp_item = NULL;
6814         const struct rte_flow_action_raw_decap *decap;
6815         const struct rte_flow_action_raw_encap *encap;
6816         const struct rte_flow_action_rss *rss = NULL;
6817         const struct rte_flow_action_rss *sample_rss = NULL;
6818         const struct rte_flow_action_count *sample_count = NULL;
6819         const struct rte_flow_item_tcp nic_tcp_mask = {
6820                 .hdr = {
6821                         .tcp_flags = 0xFF,
6822                         .src_port = RTE_BE16(UINT16_MAX),
6823                         .dst_port = RTE_BE16(UINT16_MAX),
6824                 }
6825         };
6826         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6827                 .hdr = {
6828                         .src_addr =
6829                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6830                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6831                         .dst_addr =
6832                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6833                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6834                         .vtc_flow = RTE_BE32(0xffffffff),
6835                         .proto = 0xff,
6836                         .hop_limits = 0xff,
6837                 },
6838                 .has_frag_ext = 1,
6839         };
6840         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6841                 .hdr = {
6842                         .common = {
6843                                 .u32 =
6844                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6845                                         .type = 0xFF,
6846                                         }).u32),
6847                         },
6848                         .dummy[0] = 0xffffffff,
6849                 },
6850         };
6851         struct mlx5_priv *priv = dev->data->dev_private;
6852         struct mlx5_dev_config *dev_conf = &priv->config;
6853         uint16_t queue_index = 0xFFFF;
6854         const struct rte_flow_item_vlan *vlan_m = NULL;
6855         uint32_t rw_act_num = 0;
6856         uint64_t is_root;
6857         const struct mlx5_flow_tunnel *tunnel;
6858         enum mlx5_tof_rule_type tof_rule_type;
6859         struct flow_grp_info grp_info = {
6860                 .external = !!external,
6861                 .transfer = !!attr->transfer,
6862                 .fdb_def_rule = !!priv->fdb_def_rule,
6863                 .std_tbl_fix = true,
6864         };
6865         const struct rte_eth_hairpin_conf *conf;
6866         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6867         const struct rte_flow_item *port_id_item = NULL;
6868         bool def_policy = false;
6869         uint16_t udp_dport = 0;
6870
6871         if (items == NULL)
6872                 return -1;
6873         tunnel = is_tunnel_offload_active(dev) ?
6874                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6875         if (tunnel) {
6876                 if (!priv->config.dv_flow_en)
6877                         return rte_flow_error_set
6878                                 (error, ENOTSUP,
6879                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6880                                  NULL, "tunnel offload requires DV flow interface");
6881                 if (priv->representor)
6882                         return rte_flow_error_set
6883                                 (error, ENOTSUP,
6884                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6885                                  NULL, "decap not supported for VF representor");
6886                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6887                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6888                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6889                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6890                                         MLX5_FLOW_ACTION_DECAP;
6891                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6892                                         (dev, attr, tunnel, tof_rule_type);
6893         }
6894         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6895         if (ret < 0)
6896                 return ret;
6897         is_root = (uint64_t)ret;
6898         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6899                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6900                 int type = items->type;
6901
6902                 if (!mlx5_flow_os_item_supported(type))
6903                         return rte_flow_error_set(error, ENOTSUP,
6904                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6905                                                   NULL, "item not supported");
6906                 switch (type) {
6907                 case RTE_FLOW_ITEM_TYPE_VOID:
6908                         break;
6909                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6910                         ret = flow_dv_validate_item_port_id
6911                                         (dev, items, attr, item_flags, error);
6912                         if (ret < 0)
6913                                 return ret;
6914                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6915                         port_id_item = items;
6916                         break;
6917                 case RTE_FLOW_ITEM_TYPE_ETH:
6918                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6919                                                           true, error);
6920                         if (ret < 0)
6921                                 return ret;
6922                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6923                                              MLX5_FLOW_LAYER_OUTER_L2;
6924                         if (items->mask != NULL && items->spec != NULL) {
6925                                 ether_type =
6926                                         ((const struct rte_flow_item_eth *)
6927                                          items->spec)->type;
6928                                 ether_type &=
6929                                         ((const struct rte_flow_item_eth *)
6930                                          items->mask)->type;
6931                                 ether_type = rte_be_to_cpu_16(ether_type);
6932                         } else {
6933                                 ether_type = 0;
6934                         }
6935                         break;
6936                 case RTE_FLOW_ITEM_TYPE_VLAN:
6937                         ret = flow_dv_validate_item_vlan(items, item_flags,
6938                                                          dev, error);
6939                         if (ret < 0)
6940                                 return ret;
6941                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6942                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6943                         if (items->mask != NULL && items->spec != NULL) {
6944                                 ether_type =
6945                                         ((const struct rte_flow_item_vlan *)
6946                                          items->spec)->inner_type;
6947                                 ether_type &=
6948                                         ((const struct rte_flow_item_vlan *)
6949                                          items->mask)->inner_type;
6950                                 ether_type = rte_be_to_cpu_16(ether_type);
6951                         } else {
6952                                 ether_type = 0;
6953                         }
6954                         /* Store outer VLAN mask for of_push_vlan action. */
6955                         if (!tunnel)
6956                                 vlan_m = items->mask;
6957                         break;
6958                 case RTE_FLOW_ITEM_TYPE_IPV4:
6959                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6960                                                   &item_flags, &tunnel);
6961                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6962                                                          last_item, ether_type,
6963                                                          error);
6964                         if (ret < 0)
6965                                 return ret;
6966                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6967                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6968                         if (items->mask != NULL &&
6969                             ((const struct rte_flow_item_ipv4 *)
6970                              items->mask)->hdr.next_proto_id) {
6971                                 next_protocol =
6972                                         ((const struct rte_flow_item_ipv4 *)
6973                                          (items->spec))->hdr.next_proto_id;
6974                                 next_protocol &=
6975                                         ((const struct rte_flow_item_ipv4 *)
6976                                          (items->mask))->hdr.next_proto_id;
6977                         } else {
6978                                 /* Reset for inner layer. */
6979                                 next_protocol = 0xff;
6980                         }
6981                         break;
6982                 case RTE_FLOW_ITEM_TYPE_IPV6:
6983                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6984                                                   &item_flags, &tunnel);
6985                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6986                                                            last_item,
6987                                                            ether_type,
6988                                                            &nic_ipv6_mask,
6989                                                            error);
6990                         if (ret < 0)
6991                                 return ret;
6992                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6993                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6994                         if (items->mask != NULL &&
6995                             ((const struct rte_flow_item_ipv6 *)
6996                              items->mask)->hdr.proto) {
6997                                 item_ipv6_proto =
6998                                         ((const struct rte_flow_item_ipv6 *)
6999                                          items->spec)->hdr.proto;
7000                                 next_protocol =
7001                                         ((const struct rte_flow_item_ipv6 *)
7002                                          items->spec)->hdr.proto;
7003                                 next_protocol &=
7004                                         ((const struct rte_flow_item_ipv6 *)
7005                                          items->mask)->hdr.proto;
7006                         } else {
7007                                 /* Reset for inner layer. */
7008                                 next_protocol = 0xff;
7009                         }
7010                         break;
7011                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7012                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7013                                                                   item_flags,
7014                                                                   error);
7015                         if (ret < 0)
7016                                 return ret;
7017                         last_item = tunnel ?
7018                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7019                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7020                         if (items->mask != NULL &&
7021                             ((const struct rte_flow_item_ipv6_frag_ext *)
7022                              items->mask)->hdr.next_header) {
7023                                 next_protocol =
7024                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7025                                  items->spec)->hdr.next_header;
7026                                 next_protocol &=
7027                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7028                                  items->mask)->hdr.next_header;
7029                         } else {
7030                                 /* Reset for inner layer. */
7031                                 next_protocol = 0xff;
7032                         }
7033                         break;
7034                 case RTE_FLOW_ITEM_TYPE_TCP:
7035                         ret = mlx5_flow_validate_item_tcp
7036                                                 (items, item_flags,
7037                                                  next_protocol,
7038                                                  &nic_tcp_mask,
7039                                                  error);
7040                         if (ret < 0)
7041                                 return ret;
7042                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7043                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7044                         break;
7045                 case RTE_FLOW_ITEM_TYPE_UDP:
7046                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7047                                                           next_protocol,
7048                                                           error);
7049                         const struct rte_flow_item_udp *spec = items->spec;
7050                         const struct rte_flow_item_udp *mask = items->mask;
7051                         if (!mask)
7052                                 mask = &rte_flow_item_udp_mask;
7053                         if (spec != NULL)
7054                                 udp_dport = rte_be_to_cpu_16
7055                                                 (spec->hdr.dst_port &
7056                                                  mask->hdr.dst_port);
7057                         if (ret < 0)
7058                                 return ret;
7059                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7060                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7061                         break;
7062                 case RTE_FLOW_ITEM_TYPE_GRE:
7063                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7064                                                           next_protocol, error);
7065                         if (ret < 0)
7066                                 return ret;
7067                         gre_item = items;
7068                         last_item = MLX5_FLOW_LAYER_GRE;
7069                         break;
7070                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7071                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7072                                                             next_protocol,
7073                                                             error);
7074                         if (ret < 0)
7075                                 return ret;
7076                         last_item = MLX5_FLOW_LAYER_NVGRE;
7077                         break;
7078                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7079                         ret = mlx5_flow_validate_item_gre_key
7080                                 (items, item_flags, gre_item, error);
7081                         if (ret < 0)
7082                                 return ret;
7083                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7084                         break;
7085                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7086                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7087                                                             items, item_flags,
7088                                                             attr, error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         last_item = MLX5_FLOW_LAYER_VXLAN;
7092                         break;
7093                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7094                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7095                                                                 item_flags, dev,
7096                                                                 error);
7097                         if (ret < 0)
7098                                 return ret;
7099                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7100                         break;
7101                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7102                         ret = mlx5_flow_validate_item_geneve(items,
7103                                                              item_flags, dev,
7104                                                              error);
7105                         if (ret < 0)
7106                                 return ret;
7107                         geneve_item = items;
7108                         last_item = MLX5_FLOW_LAYER_GENEVE;
7109                         break;
7110                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7111                         ret = mlx5_flow_validate_item_geneve_opt(items,
7112                                                                  last_item,
7113                                                                  geneve_item,
7114                                                                  dev,
7115                                                                  error);
7116                         if (ret < 0)
7117                                 return ret;
7118                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7119                         break;
7120                 case RTE_FLOW_ITEM_TYPE_MPLS:
7121                         ret = mlx5_flow_validate_item_mpls(dev, items,
7122                                                            item_flags,
7123                                                            last_item, error);
7124                         if (ret < 0)
7125                                 return ret;
7126                         last_item = MLX5_FLOW_LAYER_MPLS;
7127                         break;
7128
7129                 case RTE_FLOW_ITEM_TYPE_MARK:
7130                         ret = flow_dv_validate_item_mark(dev, items, attr,
7131                                                          error);
7132                         if (ret < 0)
7133                                 return ret;
7134                         last_item = MLX5_FLOW_ITEM_MARK;
7135                         break;
7136                 case RTE_FLOW_ITEM_TYPE_META:
7137                         ret = flow_dv_validate_item_meta(dev, items, attr,
7138                                                          error);
7139                         if (ret < 0)
7140                                 return ret;
7141                         last_item = MLX5_FLOW_ITEM_METADATA;
7142                         break;
7143                 case RTE_FLOW_ITEM_TYPE_ICMP:
7144                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7145                                                            next_protocol,
7146                                                            error);
7147                         if (ret < 0)
7148                                 return ret;
7149                         last_item = MLX5_FLOW_LAYER_ICMP;
7150                         break;
7151                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7152                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7153                                                             next_protocol,
7154                                                             error);
7155                         if (ret < 0)
7156                                 return ret;
7157                         item_ipv6_proto = IPPROTO_ICMPV6;
7158                         last_item = MLX5_FLOW_LAYER_ICMP6;
7159                         break;
7160                 case RTE_FLOW_ITEM_TYPE_TAG:
7161                         ret = flow_dv_validate_item_tag(dev, items,
7162                                                         attr, error);
7163                         if (ret < 0)
7164                                 return ret;
7165                         last_item = MLX5_FLOW_ITEM_TAG;
7166                         break;
7167                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7168                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7169                         break;
7170                 case RTE_FLOW_ITEM_TYPE_GTP:
7171                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7172                                                         error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         gtp_item = items;
7176                         last_item = MLX5_FLOW_LAYER_GTP;
7177                         break;
7178                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7179                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7180                                                             gtp_item, attr,
7181                                                             error);
7182                         if (ret < 0)
7183                                 return ret;
7184                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7185                         break;
7186                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7187                         /* Capacity will be checked in the translate stage. */
7188                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7189                                                             last_item,
7190                                                             ether_type,
7191                                                             &nic_ecpri_mask,
7192                                                             error);
7193                         if (ret < 0)
7194                                 return ret;
7195                         last_item = MLX5_FLOW_LAYER_ECPRI;
7196                         break;
7197                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7198                         ret = flow_dv_validate_item_integrity(dev, items,
7199                                                               item_flags,
7200                                                               &last_item,
7201                                                               integrity_items,
7202                                                               error);
7203                         if (ret < 0)
7204                                 return ret;
7205                         break;
7206                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7207                         ret = flow_dv_validate_item_aso_ct(dev, items,
7208                                                            &item_flags, error);
7209                         if (ret < 0)
7210                                 return ret;
7211                         break;
7212                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7213                         /* tunnel offload item was processed before
7214                          * list it here as a supported type
7215                          */
7216                         break;
7217                 case RTE_FLOW_ITEM_TYPE_FLEX:
7218                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7219                                                          &last_item,
7220                                                          tunnel != 0, error);
7221                         if (ret < 0)
7222                                 return ret;
7223                         break;
7224                 default:
7225                         return rte_flow_error_set(error, ENOTSUP,
7226                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7227                                                   NULL, "item not supported");
7228                 }
7229                 item_flags |= last_item;
7230         }
7231         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7232                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7233                                                            item_flags, error);
7234                 if (ret)
7235                         return ret;
7236         }
7237         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7238                 int type = actions->type;
7239                 bool shared_count = false;
7240
7241                 if (!mlx5_flow_os_action_supported(type))
7242                         return rte_flow_error_set(error, ENOTSUP,
7243                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7244                                                   actions,
7245                                                   "action not supported");
7246                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7247                         return rte_flow_error_set(error, ENOTSUP,
7248                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7249                                                   actions, "too many actions");
7250                 if (action_flags &
7251                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7252                         return rte_flow_error_set(error, ENOTSUP,
7253                                 RTE_FLOW_ERROR_TYPE_ACTION,
7254                                 NULL, "meter action with policy "
7255                                 "must be the last action");
7256                 switch (type) {
7257                 case RTE_FLOW_ACTION_TYPE_VOID:
7258                         break;
7259                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7260                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7261                         ret = flow_dv_validate_action_port_id(dev,
7262                                                               action_flags,
7263                                                               actions,
7264                                                               attr,
7265                                                               error);
7266                         if (ret)
7267                                 return ret;
7268                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7269                         ++actions_n;
7270                         break;
7271                 case RTE_FLOW_ACTION_TYPE_FLAG:
7272                         ret = flow_dv_validate_action_flag(dev, action_flags,
7273                                                            attr, error);
7274                         if (ret < 0)
7275                                 return ret;
7276                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7277                                 /* Count all modify-header actions as one. */
7278                                 if (!(action_flags &
7279                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7280                                         ++actions_n;
7281                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7282                                                 MLX5_FLOW_ACTION_MARK_EXT;
7283                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7284                                         modify_after_mirror = 1;
7285
7286                         } else {
7287                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7288                                 ++actions_n;
7289                         }
7290                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7291                         break;
7292                 case RTE_FLOW_ACTION_TYPE_MARK:
7293                         ret = flow_dv_validate_action_mark(dev, actions,
7294                                                            action_flags,
7295                                                            attr, error);
7296                         if (ret < 0)
7297                                 return ret;
7298                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7299                                 /* Count all modify-header actions as one. */
7300                                 if (!(action_flags &
7301                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7302                                         ++actions_n;
7303                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7304                                                 MLX5_FLOW_ACTION_MARK_EXT;
7305                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7306                                         modify_after_mirror = 1;
7307                         } else {
7308                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7309                                 ++actions_n;
7310                         }
7311                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7312                         break;
7313                 case RTE_FLOW_ACTION_TYPE_SET_META:
7314                         ret = flow_dv_validate_action_set_meta(dev, actions,
7315                                                                action_flags,
7316                                                                attr, error);
7317                         if (ret < 0)
7318                                 return ret;
7319                         /* Count all modify-header actions as one action. */
7320                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7321                                 ++actions_n;
7322                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7323                                 modify_after_mirror = 1;
7324                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7325                         rw_act_num += MLX5_ACT_NUM_SET_META;
7326                         break;
7327                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7328                         ret = flow_dv_validate_action_set_tag(dev, actions,
7329                                                               action_flags,
7330                                                               attr, error);
7331                         if (ret < 0)
7332                                 return ret;
7333                         /* Count all modify-header actions as one action. */
7334                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7335                                 ++actions_n;
7336                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7337                                 modify_after_mirror = 1;
7338                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7339                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7340                         break;
7341                 case RTE_FLOW_ACTION_TYPE_DROP:
7342                         ret = mlx5_flow_validate_action_drop(action_flags,
7343                                                              attr, error);
7344                         if (ret < 0)
7345                                 return ret;
7346                         action_flags |= MLX5_FLOW_ACTION_DROP;
7347                         ++actions_n;
7348                         break;
7349                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7350                         ret = mlx5_flow_validate_action_queue(actions,
7351                                                               action_flags, dev,
7352                                                               attr, error);
7353                         if (ret < 0)
7354                                 return ret;
7355                         queue_index = ((const struct rte_flow_action_queue *)
7356                                                         (actions->conf))->index;
7357                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7358                         ++actions_n;
7359                         break;
7360                 case RTE_FLOW_ACTION_TYPE_RSS:
7361                         rss = actions->conf;
7362                         ret = mlx5_flow_validate_action_rss(actions,
7363                                                             action_flags, dev,
7364                                                             attr, item_flags,
7365                                                             error);
7366                         if (ret < 0)
7367                                 return ret;
7368                         if (rss && sample_rss &&
7369                             (sample_rss->level != rss->level ||
7370                             sample_rss->types != rss->types))
7371                                 return rte_flow_error_set(error, ENOTSUP,
7372                                         RTE_FLOW_ERROR_TYPE_ACTION,
7373                                         NULL,
7374                                         "Can't use the different RSS types "
7375                                         "or level in the same flow");
7376                         if (rss != NULL && rss->queue_num)
7377                                 queue_index = rss->queue[0];
7378                         action_flags |= MLX5_FLOW_ACTION_RSS;
7379                         ++actions_n;
7380                         break;
7381                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7382                         ret =
7383                         mlx5_flow_validate_action_default_miss(action_flags,
7384                                         attr, error);
7385                         if (ret < 0)
7386                                 return ret;
7387                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7388                         ++actions_n;
7389                         break;
7390                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7391                         shared_count = true;
7392                         /* fall-through. */
7393                 case RTE_FLOW_ACTION_TYPE_COUNT:
7394                         ret = flow_dv_validate_action_count(dev, shared_count,
7395                                                             action_flags,
7396                                                             error);
7397                         if (ret < 0)
7398                                 return ret;
7399                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7400                         ++actions_n;
7401                         break;
7402                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7403                         if (flow_dv_validate_action_pop_vlan(dev,
7404                                                              action_flags,
7405                                                              actions,
7406                                                              item_flags, attr,
7407                                                              error))
7408                                 return -rte_errno;
7409                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7410                                 modify_after_mirror = 1;
7411                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7412                         ++actions_n;
7413                         break;
7414                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7415                         ret = flow_dv_validate_action_push_vlan(dev,
7416                                                                 action_flags,
7417                                                                 vlan_m,
7418                                                                 actions, attr,
7419                                                                 error);
7420                         if (ret < 0)
7421                                 return ret;
7422                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7423                                 modify_after_mirror = 1;
7424                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7425                         ++actions_n;
7426                         break;
7427                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7428                         ret = flow_dv_validate_action_set_vlan_pcp
7429                                                 (action_flags, actions, error);
7430                         if (ret < 0)
7431                                 return ret;
7432                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7433                                 modify_after_mirror = 1;
7434                         /* Count PCP with push_vlan command. */
7435                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7436                         break;
7437                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7438                         ret = flow_dv_validate_action_set_vlan_vid
7439                                                 (item_flags, action_flags,
7440                                                  actions, error);
7441                         if (ret < 0)
7442                                 return ret;
7443                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7444                                 modify_after_mirror = 1;
7445                         /* Count VID with push_vlan command. */
7446                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7447                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7448                         break;
7449                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7450                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7451                         ret = flow_dv_validate_action_l2_encap(dev,
7452                                                                action_flags,
7453                                                                actions, attr,
7454                                                                error);
7455                         if (ret < 0)
7456                                 return ret;
7457                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7458                         ++actions_n;
7459                         break;
7460                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7461                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7462                         ret = flow_dv_validate_action_decap(dev, action_flags,
7463                                                             actions, item_flags,
7464                                                             attr, error);
7465                         if (ret < 0)
7466                                 return ret;
7467                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7468                                 modify_after_mirror = 1;
7469                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7470                         ++actions_n;
7471                         break;
7472                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7473                         ret = flow_dv_validate_action_raw_encap_decap
7474                                 (dev, NULL, actions->conf, attr, &action_flags,
7475                                  &actions_n, actions, item_flags, error);
7476                         if (ret < 0)
7477                                 return ret;
7478                         break;
7479                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7480                         decap = actions->conf;
7481                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7482                                 ;
7483                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7484                                 encap = NULL;
7485                                 actions--;
7486                         } else {
7487                                 encap = actions->conf;
7488                         }
7489                         ret = flow_dv_validate_action_raw_encap_decap
7490                                            (dev,
7491                                             decap ? decap : &empty_decap, encap,
7492                                             attr, &action_flags, &actions_n,
7493                                             actions, item_flags, error);
7494                         if (ret < 0)
7495                                 return ret;
7496                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7497                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7498                                 modify_after_mirror = 1;
7499                         break;
7500                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7501                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7502                         ret = flow_dv_validate_action_modify_mac(action_flags,
7503                                                                  actions,
7504                                                                  item_flags,
7505                                                                  error);
7506                         if (ret < 0)
7507                                 return ret;
7508                         /* Count all modify-header actions as one action. */
7509                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7510                                 ++actions_n;
7511                         action_flags |= actions->type ==
7512                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7513                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7514                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7515                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7516                                 modify_after_mirror = 1;
7517                         /*
7518                          * Even if the source and destination MAC addresses have
7519                          * overlap in the header with 4B alignment, the convert
7520                          * function will handle them separately and 4 SW actions
7521                          * will be created. And 2 actions will be added each
7522                          * time no matter how many bytes of address will be set.
7523                          */
7524                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7525                         break;
7526                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7527                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7528                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7529                                                                   actions,
7530                                                                   item_flags,
7531                                                                   error);
7532                         if (ret < 0)
7533                                 return ret;
7534                         /* Count all modify-header actions as one action. */
7535                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7536                                 ++actions_n;
7537                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7538                                 modify_after_mirror = 1;
7539                         action_flags |= actions->type ==
7540                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7541                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7542                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7543                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7544                         break;
7545                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7546                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7547                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7548                                                                   actions,
7549                                                                   item_flags,
7550                                                                   error);
7551                         if (ret < 0)
7552                                 return ret;
7553                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7554                                 return rte_flow_error_set(error, ENOTSUP,
7555                                         RTE_FLOW_ERROR_TYPE_ACTION,
7556                                         actions,
7557                                         "Can't change header "
7558                                         "with ICMPv6 proto");
7559                         /* Count all modify-header actions as one action. */
7560                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7561                                 ++actions_n;
7562                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7563                                 modify_after_mirror = 1;
7564                         action_flags |= actions->type ==
7565                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7566                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7567                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7568                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7569                         break;
7570                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7571                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7572                         ret = flow_dv_validate_action_modify_tp(action_flags,
7573                                                                 actions,
7574                                                                 item_flags,
7575                                                                 error);
7576                         if (ret < 0)
7577                                 return ret;
7578                         /* Count all modify-header actions as one action. */
7579                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7580                                 ++actions_n;
7581                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7582                                 modify_after_mirror = 1;
7583                         action_flags |= actions->type ==
7584                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7585                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7586                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7587                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7588                         break;
7589                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7590                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7591                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7592                                                                  actions,
7593                                                                  item_flags,
7594                                                                  error);
7595                         if (ret < 0)
7596                                 return ret;
7597                         /* Count all modify-header actions as one action. */
7598                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7599                                 ++actions_n;
7600                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7601                                 modify_after_mirror = 1;
7602                         action_flags |= actions->type ==
7603                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7604                                                 MLX5_FLOW_ACTION_SET_TTL :
7605                                                 MLX5_FLOW_ACTION_DEC_TTL;
7606                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7607                         break;
7608                 case RTE_FLOW_ACTION_TYPE_JUMP:
7609                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7610                                                            action_flags,
7611                                                            attr, external,
7612                                                            error);
7613                         if (ret)
7614                                 return ret;
7615                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7616                             fdb_mirror_limit)
7617                                 return rte_flow_error_set(error, EINVAL,
7618                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7619                                                   NULL,
7620                                                   "sample and jump action combination is not supported");
7621                         ++actions_n;
7622                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7623                         break;
7624                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7625                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7626                         ret = flow_dv_validate_action_modify_tcp_seq
7627                                                                 (action_flags,
7628                                                                  actions,
7629                                                                  item_flags,
7630                                                                  error);
7631                         if (ret < 0)
7632                                 return ret;
7633                         /* Count all modify-header actions as one action. */
7634                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7635                                 ++actions_n;
7636                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7637                                 modify_after_mirror = 1;
7638                         action_flags |= actions->type ==
7639                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7640                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7641                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7642                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7643                         break;
7644                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7645                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7646                         ret = flow_dv_validate_action_modify_tcp_ack
7647                                                                 (action_flags,
7648                                                                  actions,
7649                                                                  item_flags,
7650                                                                  error);
7651                         if (ret < 0)
7652                                 return ret;
7653                         /* Count all modify-header actions as one action. */
7654                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7655                                 ++actions_n;
7656                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7657                                 modify_after_mirror = 1;
7658                         action_flags |= actions->type ==
7659                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7660                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7661                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7662                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7663                         break;
7664                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7665                         break;
7666                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7667                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7668                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7669                         break;
7670                 case RTE_FLOW_ACTION_TYPE_METER:
7671                         ret = mlx5_flow_validate_action_meter(dev,
7672                                                               action_flags,
7673                                                               actions, attr,
7674                                                               port_id_item,
7675                                                               &def_policy,
7676                                                               error);
7677                         if (ret < 0)
7678                                 return ret;
7679                         action_flags |= MLX5_FLOW_ACTION_METER;
7680                         if (!def_policy)
7681                                 action_flags |=
7682                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7683                         ++actions_n;
7684                         /* Meter action will add one more TAG action. */
7685                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7686                         break;
7687                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7688                         if (!attr->transfer && !attr->group)
7689                                 return rte_flow_error_set(error, ENOTSUP,
7690                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7691                                                                            NULL,
7692                           "Shared ASO age action is not supported for group 0");
7693                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7694                                 return rte_flow_error_set
7695                                                   (error, EINVAL,
7696                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7697                                                    NULL,
7698                                                    "duplicate age actions set");
7699                         action_flags |= MLX5_FLOW_ACTION_AGE;
7700                         ++actions_n;
7701                         break;
7702                 case RTE_FLOW_ACTION_TYPE_AGE:
7703                         ret = flow_dv_validate_action_age(action_flags,
7704                                                           actions, dev,
7705                                                           error);
7706                         if (ret < 0)
7707                                 return ret;
7708                         /*
7709                          * Validate the regular AGE action (using counter)
7710                          * mutual exclusion with share counter actions.
7711                          */
7712                         if (!priv->sh->flow_hit_aso_en) {
7713                                 if (shared_count)
7714                                         return rte_flow_error_set
7715                                                 (error, EINVAL,
7716                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7717                                                 NULL,
7718                                                 "old age and shared count combination is not supported");
7719                                 if (sample_count)
7720                                         return rte_flow_error_set
7721                                                 (error, EINVAL,
7722                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7723                                                 NULL,
7724                                                 "old age action and count must be in the same sub flow");
7725                         }
7726                         action_flags |= MLX5_FLOW_ACTION_AGE;
7727                         ++actions_n;
7728                         break;
7729                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7730                         ret = flow_dv_validate_action_modify_ipv4_dscp
7731                                                          (action_flags,
7732                                                           actions,
7733                                                           item_flags,
7734                                                           error);
7735                         if (ret < 0)
7736                                 return ret;
7737                         /* Count all modify-header actions as one action. */
7738                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7739                                 ++actions_n;
7740                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7741                                 modify_after_mirror = 1;
7742                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7743                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7744                         break;
7745                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7746                         ret = flow_dv_validate_action_modify_ipv6_dscp
7747                                                                 (action_flags,
7748                                                                  actions,
7749                                                                  item_flags,
7750                                                                  error);
7751                         if (ret < 0)
7752                                 return ret;
7753                         /* Count all modify-header actions as one action. */
7754                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7755                                 ++actions_n;
7756                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7757                                 modify_after_mirror = 1;
7758                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7759                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7760                         break;
7761                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7762                         ret = flow_dv_validate_action_sample(&action_flags,
7763                                                              actions, dev,
7764                                                              attr, item_flags,
7765                                                              rss, &sample_rss,
7766                                                              &sample_count,
7767                                                              &fdb_mirror_limit,
7768                                                              error);
7769                         if (ret < 0)
7770                                 return ret;
7771                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7772                         ++actions_n;
7773                         break;
7774                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7775                         ret = flow_dv_validate_action_modify_field(dev,
7776                                                                    action_flags,
7777                                                                    actions,
7778                                                                    attr,
7779                                                                    error);
7780                         if (ret < 0)
7781                                 return ret;
7782                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7783                                 modify_after_mirror = 1;
7784                         /* Count all modify-header actions as one action. */
7785                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7786                                 ++actions_n;
7787                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7788                         rw_act_num += ret;
7789                         break;
7790                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7791                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7792                                                              item_flags, attr,
7793                                                              error);
7794                         if (ret < 0)
7795                                 return ret;
7796                         action_flags |= MLX5_FLOW_ACTION_CT;
7797                         break;
7798                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7799                         /* tunnel offload action was processed before
7800                          * list it here as a supported type
7801                          */
7802                         break;
7803                 default:
7804                         return rte_flow_error_set(error, ENOTSUP,
7805                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7806                                                   actions,
7807                                                   "action not supported");
7808                 }
7809         }
7810         /*
7811          * Validate actions in flow rules
7812          * - Explicit decap action is prohibited by the tunnel offload API.
7813          * - Drop action in tunnel steer rule is prohibited by the API.
7814          * - Application cannot use MARK action because it's value can mask
7815          *   tunnel default miss nitification.
7816          * - JUMP in tunnel match rule has no support in current PMD
7817          *   implementation.
7818          * - TAG & META are reserved for future uses.
7819          */
7820         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7821                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7822                                             MLX5_FLOW_ACTION_MARK     |
7823                                             MLX5_FLOW_ACTION_SET_TAG  |
7824                                             MLX5_FLOW_ACTION_SET_META |
7825                                             MLX5_FLOW_ACTION_DROP;
7826
7827                 if (action_flags & bad_actions_mask)
7828                         return rte_flow_error_set
7829                                         (error, EINVAL,
7830                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7831                                         "Invalid RTE action in tunnel "
7832                                         "set decap rule");
7833                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7834                         return rte_flow_error_set
7835                                         (error, EINVAL,
7836                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7837                                         "tunnel set decap rule must terminate "
7838                                         "with JUMP");
7839                 if (!attr->ingress)
7840                         return rte_flow_error_set
7841                                         (error, EINVAL,
7842                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7843                                         "tunnel flows for ingress traffic only");
7844         }
7845         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7846                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7847                                             MLX5_FLOW_ACTION_MARK    |
7848                                             MLX5_FLOW_ACTION_SET_TAG |
7849                                             MLX5_FLOW_ACTION_SET_META;
7850
7851                 if (action_flags & bad_actions_mask)
7852                         return rte_flow_error_set
7853                                         (error, EINVAL,
7854                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7855                                         "Invalid RTE action in tunnel "
7856                                         "set match rule");
7857         }
7858         /*
7859          * Validate the drop action mutual exclusion with other actions.
7860          * Drop action is mutually-exclusive with any other action, except for
7861          * Count action.
7862          * Drop action compatibility with tunnel offload was already validated.
7863          */
7864         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7865                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7866         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7867             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7868                 return rte_flow_error_set(error, EINVAL,
7869                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7870                                           "Drop action is mutually-exclusive "
7871                                           "with any other action, except for "
7872                                           "Count action");
7873         /* Eswitch has few restrictions on using items and actions */
7874         if (attr->transfer) {
7875                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7876                     action_flags & MLX5_FLOW_ACTION_FLAG)
7877                         return rte_flow_error_set(error, ENOTSUP,
7878                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7879                                                   NULL,
7880                                                   "unsupported action FLAG");
7881                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7882                     action_flags & MLX5_FLOW_ACTION_MARK)
7883                         return rte_flow_error_set(error, ENOTSUP,
7884                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7885                                                   NULL,
7886                                                   "unsupported action MARK");
7887                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7888                         return rte_flow_error_set(error, ENOTSUP,
7889                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7890                                                   NULL,
7891                                                   "unsupported action QUEUE");
7892                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7893                         return rte_flow_error_set(error, ENOTSUP,
7894                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7895                                                   NULL,
7896                                                   "unsupported action RSS");
7897                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7898                         return rte_flow_error_set(error, EINVAL,
7899                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7900                                                   actions,
7901                                                   "no fate action is found");
7902         } else {
7903                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7904                         return rte_flow_error_set(error, EINVAL,
7905                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7906                                                   actions,
7907                                                   "no fate action is found");
7908         }
7909         /*
7910          * Continue validation for Xcap and VLAN actions.
7911          * If hairpin is working in explicit TX rule mode, there is no actions
7912          * splitting and the validation of hairpin ingress flow should be the
7913          * same as other standard flows.
7914          */
7915         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7916                              MLX5_FLOW_VLAN_ACTIONS)) &&
7917             (queue_index == 0xFFFF ||
7918              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7919              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7920              conf->tx_explicit != 0))) {
7921                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7922                     MLX5_FLOW_XCAP_ACTIONS)
7923                         return rte_flow_error_set(error, ENOTSUP,
7924                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7925                                                   NULL, "encap and decap "
7926                                                   "combination aren't supported");
7927                 if (!attr->transfer && attr->ingress) {
7928                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7929                                 return rte_flow_error_set
7930                                                 (error, ENOTSUP,
7931                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7932                                                  NULL, "encap is not supported"
7933                                                  " for ingress traffic");
7934                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7935                                 return rte_flow_error_set
7936                                                 (error, ENOTSUP,
7937                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7938                                                  NULL, "push VLAN action not "
7939                                                  "supported for ingress");
7940                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7941                                         MLX5_FLOW_VLAN_ACTIONS)
7942                                 return rte_flow_error_set
7943                                                 (error, ENOTSUP,
7944                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7945                                                  NULL, "no support for "
7946                                                  "multiple VLAN actions");
7947                 }
7948         }
7949         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7950                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7951                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7952                         attr->ingress)
7953                         return rte_flow_error_set
7954                                 (error, ENOTSUP,
7955                                 RTE_FLOW_ERROR_TYPE_ACTION,
7956                                 NULL, "fate action not supported for "
7957                                 "meter with policy");
7958                 if (attr->egress) {
7959                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7960                                 return rte_flow_error_set
7961                                         (error, ENOTSUP,
7962                                         RTE_FLOW_ERROR_TYPE_ACTION,
7963                                         NULL, "modify header action in egress "
7964                                         "cannot be done before meter action");
7965                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7966                                 return rte_flow_error_set
7967                                         (error, ENOTSUP,
7968                                         RTE_FLOW_ERROR_TYPE_ACTION,
7969                                         NULL, "encap action in egress "
7970                                         "cannot be done before meter action");
7971                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7972                                 return rte_flow_error_set
7973                                         (error, ENOTSUP,
7974                                         RTE_FLOW_ERROR_TYPE_ACTION,
7975                                         NULL, "push vlan action in egress "
7976                                         "cannot be done before meter action");
7977                 }
7978         }
7979         /*
7980          * Hairpin flow will add one more TAG action in TX implicit mode.
7981          * In TX explicit mode, there will be no hairpin flow ID.
7982          */
7983         if (hairpin > 0)
7984                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7985         /* extra metadata enabled: one more TAG action will be add. */
7986         if (dev_conf->dv_flow_en &&
7987             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7988             mlx5_flow_ext_mreg_supported(dev))
7989                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7990         if (rw_act_num >
7991                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7992                 return rte_flow_error_set(error, ENOTSUP,
7993                                           RTE_FLOW_ERROR_TYPE_ACTION,
7994                                           NULL, "too many header modify"
7995                                           " actions to support");
7996         }
7997         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7998         if (fdb_mirror_limit && modify_after_mirror)
7999                 return rte_flow_error_set(error, EINVAL,
8000                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8001                                 "sample before modify action is not supported");
8002         return 0;
8003 }
8004
8005 /**
8006  * Internal preparation function. Allocates the DV flow size,
8007  * this size is constant.
8008  *
8009  * @param[in] dev
8010  *   Pointer to the rte_eth_dev structure.
8011  * @param[in] attr
8012  *   Pointer to the flow attributes.
8013  * @param[in] items
8014  *   Pointer to the list of items.
8015  * @param[in] actions
8016  *   Pointer to the list of actions.
8017  * @param[out] error
8018  *   Pointer to the error structure.
8019  *
8020  * @return
8021  *   Pointer to mlx5_flow object on success,
8022  *   otherwise NULL and rte_errno is set.
8023  */
8024 static struct mlx5_flow *
8025 flow_dv_prepare(struct rte_eth_dev *dev,
8026                 const struct rte_flow_attr *attr __rte_unused,
8027                 const struct rte_flow_item items[] __rte_unused,
8028                 const struct rte_flow_action actions[] __rte_unused,
8029                 struct rte_flow_error *error)
8030 {
8031         uint32_t handle_idx = 0;
8032         struct mlx5_flow *dev_flow;
8033         struct mlx5_flow_handle *dev_handle;
8034         struct mlx5_priv *priv = dev->data->dev_private;
8035         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8036
8037         MLX5_ASSERT(wks);
8038         wks->skip_matcher_reg = 0;
8039         wks->policy = NULL;
8040         wks->final_policy = NULL;
8041         /* In case of corrupting the memory. */
8042         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8043                 rte_flow_error_set(error, ENOSPC,
8044                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8045                                    "not free temporary device flow");
8046                 return NULL;
8047         }
8048         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8049                                    &handle_idx);
8050         if (!dev_handle) {
8051                 rte_flow_error_set(error, ENOMEM,
8052                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8053                                    "not enough memory to create flow handle");
8054                 return NULL;
8055         }
8056         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8057         dev_flow = &wks->flows[wks->flow_idx++];
8058         memset(dev_flow, 0, sizeof(*dev_flow));
8059         dev_flow->handle = dev_handle;
8060         dev_flow->handle_idx = handle_idx;
8061         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8062         dev_flow->ingress = attr->ingress;
8063         dev_flow->dv.transfer = attr->transfer;
8064         return dev_flow;
8065 }
8066
8067 #ifdef RTE_LIBRTE_MLX5_DEBUG
8068 /**
8069  * Sanity check for match mask and value. Similar to check_valid_spec() in
8070  * kernel driver. If unmasked bit is present in value, it returns failure.
8071  *
8072  * @param match_mask
8073  *   pointer to match mask buffer.
8074  * @param match_value
8075  *   pointer to match value buffer.
8076  *
8077  * @return
8078  *   0 if valid, -EINVAL otherwise.
8079  */
8080 static int
8081 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8082 {
8083         uint8_t *m = match_mask;
8084         uint8_t *v = match_value;
8085         unsigned int i;
8086
8087         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8088                 if (v[i] & ~m[i]) {
8089                         DRV_LOG(ERR,
8090                                 "match_value differs from match_criteria"
8091                                 " %p[%u] != %p[%u]",
8092                                 match_value, i, match_mask, i);
8093                         return -EINVAL;
8094                 }
8095         }
8096         return 0;
8097 }
8098 #endif
8099
8100 /**
8101  * Add match of ip_version.
8102  *
8103  * @param[in] group
8104  *   Flow group.
8105  * @param[in] headers_v
8106  *   Values header pointer.
8107  * @param[in] headers_m
8108  *   Masks header pointer.
8109  * @param[in] ip_version
8110  *   The IP version to set.
8111  */
8112 static inline void
8113 flow_dv_set_match_ip_version(uint32_t group,
8114                              void *headers_v,
8115                              void *headers_m,
8116                              uint8_t ip_version)
8117 {
8118         if (group == 0)
8119                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8120         else
8121                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8122                          ip_version);
8123         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8124         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8125         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8126 }
8127
8128 /**
8129  * Add Ethernet item to matcher and to the value.
8130  *
8131  * @param[in, out] matcher
8132  *   Flow matcher.
8133  * @param[in, out] key
8134  *   Flow matcher value.
8135  * @param[in] item
8136  *   Flow pattern to translate.
8137  * @param[in] inner
8138  *   Item is inner pattern.
8139  */
8140 static void
8141 flow_dv_translate_item_eth(void *matcher, void *key,
8142                            const struct rte_flow_item *item, int inner,
8143                            uint32_t group)
8144 {
8145         const struct rte_flow_item_eth *eth_m = item->mask;
8146         const struct rte_flow_item_eth *eth_v = item->spec;
8147         const struct rte_flow_item_eth nic_mask = {
8148                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8149                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8150                 .type = RTE_BE16(0xffff),
8151                 .has_vlan = 0,
8152         };
8153         void *hdrs_m;
8154         void *hdrs_v;
8155         char *l24_v;
8156         unsigned int i;
8157
8158         if (!eth_v)
8159                 return;
8160         if (!eth_m)
8161                 eth_m = &nic_mask;
8162         if (inner) {
8163                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8164                                          inner_headers);
8165                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8166         } else {
8167                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8168                                          outer_headers);
8169                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8170         }
8171         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8172                &eth_m->dst, sizeof(eth_m->dst));
8173         /* The value must be in the range of the mask. */
8174         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8175         for (i = 0; i < sizeof(eth_m->dst); ++i)
8176                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8177         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8178                &eth_m->src, sizeof(eth_m->src));
8179         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8180         /* The value must be in the range of the mask. */
8181         for (i = 0; i < sizeof(eth_m->dst); ++i)
8182                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8183         /*
8184          * HW supports match on one Ethertype, the Ethertype following the last
8185          * VLAN tag of the packet (see PRM).
8186          * Set match on ethertype only if ETH header is not followed by VLAN.
8187          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8188          * ethertype, and use ip_version field instead.
8189          * eCPRI over Ether layer will use type value 0xAEFE.
8190          */
8191         if (eth_m->type == 0xFFFF) {
8192                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8193                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8194                 switch (eth_v->type) {
8195                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8196                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8197                         return;
8198                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8199                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8200                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8201                         return;
8202                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8203                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8204                         return;
8205                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8206                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8207                         return;
8208                 default:
8209                         break;
8210                 }
8211         }
8212         if (eth_m->has_vlan) {
8213                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8214                 if (eth_v->has_vlan) {
8215                         /*
8216                          * Here, when also has_more_vlan field in VLAN item is
8217                          * not set, only single-tagged packets will be matched.
8218                          */
8219                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8220                         return;
8221                 }
8222         }
8223         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8224                  rte_be_to_cpu_16(eth_m->type));
8225         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8226         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8227 }
8228
8229 /**
8230  * Add VLAN item to matcher and to the value.
8231  *
8232  * @param[in, out] dev_flow
8233  *   Flow descriptor.
8234  * @param[in, out] matcher
8235  *   Flow matcher.
8236  * @param[in, out] key
8237  *   Flow matcher value.
8238  * @param[in] item
8239  *   Flow pattern to translate.
8240  * @param[in] inner
8241  *   Item is inner pattern.
8242  */
8243 static void
8244 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8245                             void *matcher, void *key,
8246                             const struct rte_flow_item *item,
8247                             int inner, uint32_t group)
8248 {
8249         const struct rte_flow_item_vlan *vlan_m = item->mask;
8250         const struct rte_flow_item_vlan *vlan_v = item->spec;
8251         void *hdrs_m;
8252         void *hdrs_v;
8253         uint16_t tci_m;
8254         uint16_t tci_v;
8255
8256         if (inner) {
8257                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8258                                          inner_headers);
8259                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8260         } else {
8261                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8262                                          outer_headers);
8263                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8264                 /*
8265                  * This is workaround, masks are not supported,
8266                  * and pre-validated.
8267                  */
8268                 if (vlan_v)
8269                         dev_flow->handle->vf_vlan.tag =
8270                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8271         }
8272         /*
8273          * When VLAN item exists in flow, mark packet as tagged,
8274          * even if TCI is not specified.
8275          */
8276         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8277                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8278                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8279         }
8280         if (!vlan_v)
8281                 return;
8282         if (!vlan_m)
8283                 vlan_m = &rte_flow_item_vlan_mask;
8284         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8285         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8286         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8287         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8288         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8289         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8290         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8291         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8292         /*
8293          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8294          * ethertype, and use ip_version field instead.
8295          */
8296         if (vlan_m->inner_type == 0xFFFF) {
8297                 switch (vlan_v->inner_type) {
8298                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8299                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8300                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8301                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8302                         return;
8303                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8304                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8305                         return;
8306                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8307                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8308                         return;
8309                 default:
8310                         break;
8311                 }
8312         }
8313         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8314                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8315                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8316                 /* Only one vlan_tag bit can be set. */
8317                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8318                 return;
8319         }
8320         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8321                  rte_be_to_cpu_16(vlan_m->inner_type));
8322         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8323                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8324 }
8325
8326 /**
8327  * Add IPV4 item to matcher and to the value.
8328  *
8329  * @param[in, out] matcher
8330  *   Flow matcher.
8331  * @param[in, out] key
8332  *   Flow matcher value.
8333  * @param[in] item
8334  *   Flow pattern to translate.
8335  * @param[in] inner
8336  *   Item is inner pattern.
8337  * @param[in] group
8338  *   The group to insert the rule.
8339  */
8340 static void
8341 flow_dv_translate_item_ipv4(void *matcher, void *key,
8342                             const struct rte_flow_item *item,
8343                             int inner, uint32_t group)
8344 {
8345         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8346         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8347         const struct rte_flow_item_ipv4 nic_mask = {
8348                 .hdr = {
8349                         .src_addr = RTE_BE32(0xffffffff),
8350                         .dst_addr = RTE_BE32(0xffffffff),
8351                         .type_of_service = 0xff,
8352                         .next_proto_id = 0xff,
8353                         .time_to_live = 0xff,
8354                 },
8355         };
8356         void *headers_m;
8357         void *headers_v;
8358         char *l24_m;
8359         char *l24_v;
8360         uint8_t tos, ihl_m, ihl_v;
8361
8362         if (inner) {
8363                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8364                                          inner_headers);
8365                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8366         } else {
8367                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8368                                          outer_headers);
8369                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8370         }
8371         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8372         if (!ipv4_v)
8373                 return;
8374         if (!ipv4_m)
8375                 ipv4_m = &nic_mask;
8376         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8377                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8378         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8379                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8380         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8381         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8382         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8383                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8384         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8385                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8386         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8387         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8388         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8389         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8390         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8391         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8392         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8393         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8394                  ipv4_m->hdr.type_of_service);
8395         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8396         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8397                  ipv4_m->hdr.type_of_service >> 2);
8398         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8399         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8400                  ipv4_m->hdr.next_proto_id);
8401         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8402                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8403         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8404                  ipv4_m->hdr.time_to_live);
8405         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8406                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8407         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8408                  !!(ipv4_m->hdr.fragment_offset));
8409         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8410                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8411 }
8412
8413 /**
8414  * Add IPV6 item to matcher and to the value.
8415  *
8416  * @param[in, out] matcher
8417  *   Flow matcher.
8418  * @param[in, out] key
8419  *   Flow matcher value.
8420  * @param[in] item
8421  *   Flow pattern to translate.
8422  * @param[in] inner
8423  *   Item is inner pattern.
8424  * @param[in] group
8425  *   The group to insert the rule.
8426  */
8427 static void
8428 flow_dv_translate_item_ipv6(void *matcher, void *key,
8429                             const struct rte_flow_item *item,
8430                             int inner, uint32_t group)
8431 {
8432         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8433         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8434         const struct rte_flow_item_ipv6 nic_mask = {
8435                 .hdr = {
8436                         .src_addr =
8437                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8438                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8439                         .dst_addr =
8440                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8441                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8442                         .vtc_flow = RTE_BE32(0xffffffff),
8443                         .proto = 0xff,
8444                         .hop_limits = 0xff,
8445                 },
8446         };
8447         void *headers_m;
8448         void *headers_v;
8449         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8450         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8451         char *l24_m;
8452         char *l24_v;
8453         uint32_t vtc_m;
8454         uint32_t vtc_v;
8455         int i;
8456         int size;
8457
8458         if (inner) {
8459                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8460                                          inner_headers);
8461                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8462         } else {
8463                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8464                                          outer_headers);
8465                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8466         }
8467         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8468         if (!ipv6_v)
8469                 return;
8470         if (!ipv6_m)
8471                 ipv6_m = &nic_mask;
8472         size = sizeof(ipv6_m->hdr.dst_addr);
8473         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8474                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8475         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8476                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8477         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8478         for (i = 0; i < size; ++i)
8479                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8480         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8481                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8482         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8483                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8484         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8485         for (i = 0; i < size; ++i)
8486                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8487         /* TOS. */
8488         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8489         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8490         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8491         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8492         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8493         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8494         /* Label. */
8495         if (inner) {
8496                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8497                          vtc_m);
8498                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8499                          vtc_v);
8500         } else {
8501                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8502                          vtc_m);
8503                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8504                          vtc_v);
8505         }
8506         /* Protocol. */
8507         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8508                  ipv6_m->hdr.proto);
8509         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8510                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8511         /* Hop limit. */
8512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8513                  ipv6_m->hdr.hop_limits);
8514         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8515                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8516         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8517                  !!(ipv6_m->has_frag_ext));
8518         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8519                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8520 }
8521
8522 /**
8523  * Add IPV6 fragment extension item to matcher and to the value.
8524  *
8525  * @param[in, out] matcher
8526  *   Flow matcher.
8527  * @param[in, out] key
8528  *   Flow matcher value.
8529  * @param[in] item
8530  *   Flow pattern to translate.
8531  * @param[in] inner
8532  *   Item is inner pattern.
8533  */
8534 static void
8535 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8536                                      const struct rte_flow_item *item,
8537                                      int inner)
8538 {
8539         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8540         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8541         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8542                 .hdr = {
8543                         .next_header = 0xff,
8544                         .frag_data = RTE_BE16(0xffff),
8545                 },
8546         };
8547         void *headers_m;
8548         void *headers_v;
8549
8550         if (inner) {
8551                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8552                                          inner_headers);
8553                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8554         } else {
8555                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8556                                          outer_headers);
8557                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8558         }
8559         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8560         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8561         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8562         if (!ipv6_frag_ext_v)
8563                 return;
8564         if (!ipv6_frag_ext_m)
8565                 ipv6_frag_ext_m = &nic_mask;
8566         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8567                  ipv6_frag_ext_m->hdr.next_header);
8568         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8569                  ipv6_frag_ext_v->hdr.next_header &
8570                  ipv6_frag_ext_m->hdr.next_header);
8571 }
8572
8573 /**
8574  * Add TCP item to matcher and to the value.
8575  *
8576  * @param[in, out] matcher
8577  *   Flow matcher.
8578  * @param[in, out] key
8579  *   Flow matcher value.
8580  * @param[in] item
8581  *   Flow pattern to translate.
8582  * @param[in] inner
8583  *   Item is inner pattern.
8584  */
8585 static void
8586 flow_dv_translate_item_tcp(void *matcher, void *key,
8587                            const struct rte_flow_item *item,
8588                            int inner)
8589 {
8590         const struct rte_flow_item_tcp *tcp_m = item->mask;
8591         const struct rte_flow_item_tcp *tcp_v = item->spec;
8592         void *headers_m;
8593         void *headers_v;
8594
8595         if (inner) {
8596                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8597                                          inner_headers);
8598                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8599         } else {
8600                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8601                                          outer_headers);
8602                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8603         }
8604         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8605         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8606         if (!tcp_v)
8607                 return;
8608         if (!tcp_m)
8609                 tcp_m = &rte_flow_item_tcp_mask;
8610         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8611                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8612         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8613                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8614         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8615                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8616         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8617                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8618         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8619                  tcp_m->hdr.tcp_flags);
8620         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8621                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8622 }
8623
8624 /**
8625  * Add UDP item to matcher and to the value.
8626  *
8627  * @param[in, out] matcher
8628  *   Flow matcher.
8629  * @param[in, out] key
8630  *   Flow matcher value.
8631  * @param[in] item
8632  *   Flow pattern to translate.
8633  * @param[in] inner
8634  *   Item is inner pattern.
8635  */
8636 static void
8637 flow_dv_translate_item_udp(void *matcher, void *key,
8638                            const struct rte_flow_item *item,
8639                            int inner)
8640 {
8641         const struct rte_flow_item_udp *udp_m = item->mask;
8642         const struct rte_flow_item_udp *udp_v = item->spec;
8643         void *headers_m;
8644         void *headers_v;
8645
8646         if (inner) {
8647                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8648                                          inner_headers);
8649                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8650         } else {
8651                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8652                                          outer_headers);
8653                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8654         }
8655         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8656         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8657         if (!udp_v)
8658                 return;
8659         if (!udp_m)
8660                 udp_m = &rte_flow_item_udp_mask;
8661         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8662                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8663         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8664                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8665         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8666                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8667         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8668                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8669 }
8670
8671 /**
8672  * Add GRE optional Key item to matcher and to the value.
8673  *
8674  * @param[in, out] matcher
8675  *   Flow matcher.
8676  * @param[in, out] key
8677  *   Flow matcher value.
8678  * @param[in] item
8679  *   Flow pattern to translate.
8680  * @param[in] inner
8681  *   Item is inner pattern.
8682  */
8683 static void
8684 flow_dv_translate_item_gre_key(void *matcher, void *key,
8685                                    const struct rte_flow_item *item)
8686 {
8687         const rte_be32_t *key_m = item->mask;
8688         const rte_be32_t *key_v = item->spec;
8689         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8690         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8691         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8692
8693         /* GRE K bit must be on and should already be validated */
8694         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8695         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8696         if (!key_v)
8697                 return;
8698         if (!key_m)
8699                 key_m = &gre_key_default_mask;
8700         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8701                  rte_be_to_cpu_32(*key_m) >> 8);
8702         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8703                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8704         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8705                  rte_be_to_cpu_32(*key_m) & 0xFF);
8706         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8707                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8708 }
8709
8710 /**
8711  * Add GRE item to matcher and to the value.
8712  *
8713  * @param[in, out] matcher
8714  *   Flow matcher.
8715  * @param[in, out] key
8716  *   Flow matcher value.
8717  * @param[in] item
8718  *   Flow pattern to translate.
8719  * @param[in] inner
8720  *   Item is inner pattern.
8721  */
8722 static void
8723 flow_dv_translate_item_gre(void *matcher, void *key,
8724                            const struct rte_flow_item *item,
8725                            int inner)
8726 {
8727         const struct rte_flow_item_gre *gre_m = item->mask;
8728         const struct rte_flow_item_gre *gre_v = item->spec;
8729         void *headers_m;
8730         void *headers_v;
8731         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8732         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8733         struct {
8734                 union {
8735                         __extension__
8736                         struct {
8737                                 uint16_t version:3;
8738                                 uint16_t rsvd0:9;
8739                                 uint16_t s_present:1;
8740                                 uint16_t k_present:1;
8741                                 uint16_t rsvd_bit1:1;
8742                                 uint16_t c_present:1;
8743                         };
8744                         uint16_t value;
8745                 };
8746         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8747
8748         if (inner) {
8749                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8750                                          inner_headers);
8751                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8752         } else {
8753                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8754                                          outer_headers);
8755                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8756         }
8757         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8758         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8759         if (!gre_v)
8760                 return;
8761         if (!gre_m)
8762                 gre_m = &rte_flow_item_gre_mask;
8763         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8764                  rte_be_to_cpu_16(gre_m->protocol));
8765         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8766                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8767         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8768         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8769         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8770                  gre_crks_rsvd0_ver_m.c_present);
8771         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8772                  gre_crks_rsvd0_ver_v.c_present &
8773                  gre_crks_rsvd0_ver_m.c_present);
8774         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8775                  gre_crks_rsvd0_ver_m.k_present);
8776         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8777                  gre_crks_rsvd0_ver_v.k_present &
8778                  gre_crks_rsvd0_ver_m.k_present);
8779         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8780                  gre_crks_rsvd0_ver_m.s_present);
8781         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8782                  gre_crks_rsvd0_ver_v.s_present &
8783                  gre_crks_rsvd0_ver_m.s_present);
8784 }
8785
8786 /**
8787  * Add NVGRE item to matcher and to the value.
8788  *
8789  * @param[in, out] matcher
8790  *   Flow matcher.
8791  * @param[in, out] key
8792  *   Flow matcher value.
8793  * @param[in] item
8794  *   Flow pattern to translate.
8795  * @param[in] inner
8796  *   Item is inner pattern.
8797  */
8798 static void
8799 flow_dv_translate_item_nvgre(void *matcher, void *key,
8800                              const struct rte_flow_item *item,
8801                              int inner)
8802 {
8803         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8804         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8805         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8806         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8807         const char *tni_flow_id_m;
8808         const char *tni_flow_id_v;
8809         char *gre_key_m;
8810         char *gre_key_v;
8811         int size;
8812         int i;
8813
8814         /* For NVGRE, GRE header fields must be set with defined values. */
8815         const struct rte_flow_item_gre gre_spec = {
8816                 .c_rsvd0_ver = RTE_BE16(0x2000),
8817                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8818         };
8819         const struct rte_flow_item_gre gre_mask = {
8820                 .c_rsvd0_ver = RTE_BE16(0xB000),
8821                 .protocol = RTE_BE16(UINT16_MAX),
8822         };
8823         const struct rte_flow_item gre_item = {
8824                 .spec = &gre_spec,
8825                 .mask = &gre_mask,
8826                 .last = NULL,
8827         };
8828         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8829         if (!nvgre_v)
8830                 return;
8831         if (!nvgre_m)
8832                 nvgre_m = &rte_flow_item_nvgre_mask;
8833         tni_flow_id_m = (const char *)nvgre_m->tni;
8834         tni_flow_id_v = (const char *)nvgre_v->tni;
8835         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8836         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8837         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8838         memcpy(gre_key_m, tni_flow_id_m, size);
8839         for (i = 0; i < size; ++i)
8840                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8841 }
8842
8843 /**
8844  * Add VXLAN item to matcher and to the value.
8845  *
8846  * @param[in] dev
8847  *   Pointer to the Ethernet device structure.
8848  * @param[in] attr
8849  *   Flow rule attributes.
8850  * @param[in, out] matcher
8851  *   Flow matcher.
8852  * @param[in, out] key
8853  *   Flow matcher value.
8854  * @param[in] item
8855  *   Flow pattern to translate.
8856  * @param[in] inner
8857  *   Item is inner pattern.
8858  */
8859 static void
8860 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8861                              const struct rte_flow_attr *attr,
8862                              void *matcher, void *key,
8863                              const struct rte_flow_item *item,
8864                              int inner)
8865 {
8866         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8867         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8868         void *headers_m;
8869         void *headers_v;
8870         void *misc5_m;
8871         void *misc5_v;
8872         uint32_t *tunnel_header_v;
8873         uint32_t *tunnel_header_m;
8874         uint16_t dport;
8875         struct mlx5_priv *priv = dev->data->dev_private;
8876         const struct rte_flow_item_vxlan nic_mask = {
8877                 .vni = "\xff\xff\xff",
8878                 .rsvd1 = 0xff,
8879         };
8880
8881         if (inner) {
8882                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8883                                          inner_headers);
8884                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8885         } else {
8886                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8887                                          outer_headers);
8888                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8889         }
8890         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8891                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8892         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8893                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8894                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8895         }
8896         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8897         if (!vxlan_v)
8898                 return;
8899         if (!vxlan_m) {
8900                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8901                     (attr->group && !priv->sh->misc5_cap))
8902                         vxlan_m = &rte_flow_item_vxlan_mask;
8903                 else
8904                         vxlan_m = &nic_mask;
8905         }
8906         if ((priv->sh->steering_format_version ==
8907             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8908             dport != MLX5_UDP_PORT_VXLAN) ||
8909             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8910             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8911                 void *misc_m;
8912                 void *misc_v;
8913                 char *vni_m;
8914                 char *vni_v;
8915                 int size;
8916                 int i;
8917                 misc_m = MLX5_ADDR_OF(fte_match_param,
8918                                       matcher, misc_parameters);
8919                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8920                 size = sizeof(vxlan_m->vni);
8921                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8922                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8923                 memcpy(vni_m, vxlan_m->vni, size);
8924                 for (i = 0; i < size; ++i)
8925                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8926                 return;
8927         }
8928         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8929         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8930         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8931                                                    misc5_v,
8932                                                    tunnel_header_1);
8933         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8934                                                    misc5_m,
8935                                                    tunnel_header_1);
8936         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8937                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8938                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8939         if (*tunnel_header_v)
8940                 *tunnel_header_m = vxlan_m->vni[0] |
8941                         vxlan_m->vni[1] << 8 |
8942                         vxlan_m->vni[2] << 16;
8943         else
8944                 *tunnel_header_m = 0x0;
8945         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8946         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8947                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8948 }
8949
8950 /**
8951  * Add VXLAN-GPE item to matcher and to the value.
8952  *
8953  * @param[in, out] matcher
8954  *   Flow matcher.
8955  * @param[in, out] key
8956  *   Flow matcher value.
8957  * @param[in] item
8958  *   Flow pattern to translate.
8959  * @param[in] inner
8960  *   Item is inner pattern.
8961  */
8962
8963 static void
8964 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8965                                  const struct rte_flow_item *item, int inner)
8966 {
8967         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8968         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8969         void *headers_m;
8970         void *headers_v;
8971         void *misc_m =
8972                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8973         void *misc_v =
8974                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8975         char *vni_m;
8976         char *vni_v;
8977         uint16_t dport;
8978         int size;
8979         int i;
8980         uint8_t flags_m = 0xff;
8981         uint8_t flags_v = 0xc;
8982
8983         if (inner) {
8984                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8985                                          inner_headers);
8986                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8987         } else {
8988                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8989                                          outer_headers);
8990                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8991         }
8992         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8993                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8994         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8995                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8996                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8997         }
8998         if (!vxlan_v)
8999                 return;
9000         if (!vxlan_m)
9001                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9002         size = sizeof(vxlan_m->vni);
9003         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9004         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9005         memcpy(vni_m, vxlan_m->vni, size);
9006         for (i = 0; i < size; ++i)
9007                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9008         if (vxlan_m->flags) {
9009                 flags_m = vxlan_m->flags;
9010                 flags_v = vxlan_v->flags;
9011         }
9012         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9013         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9014         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
9015                  vxlan_m->protocol);
9016         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
9017                  vxlan_v->protocol);
9018 }
9019
9020 /**
9021  * Add Geneve item to matcher and to the value.
9022  *
9023  * @param[in, out] matcher
9024  *   Flow matcher.
9025  * @param[in, out] key
9026  *   Flow matcher value.
9027  * @param[in] item
9028  *   Flow pattern to translate.
9029  * @param[in] inner
9030  *   Item is inner pattern.
9031  */
9032
9033 static void
9034 flow_dv_translate_item_geneve(void *matcher, void *key,
9035                               const struct rte_flow_item *item, int inner)
9036 {
9037         const struct rte_flow_item_geneve *geneve_m = item->mask;
9038         const struct rte_flow_item_geneve *geneve_v = item->spec;
9039         void *headers_m;
9040         void *headers_v;
9041         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9042         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9043         uint16_t dport;
9044         uint16_t gbhdr_m;
9045         uint16_t gbhdr_v;
9046         char *vni_m;
9047         char *vni_v;
9048         size_t size, i;
9049
9050         if (inner) {
9051                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9052                                          inner_headers);
9053                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9054         } else {
9055                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9056                                          outer_headers);
9057                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9058         }
9059         dport = MLX5_UDP_PORT_GENEVE;
9060         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9061                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9062                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9063         }
9064         if (!geneve_v)
9065                 return;
9066         if (!geneve_m)
9067                 geneve_m = &rte_flow_item_geneve_mask;
9068         size = sizeof(geneve_m->vni);
9069         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9070         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9071         memcpy(vni_m, geneve_m->vni, size);
9072         for (i = 0; i < size; ++i)
9073                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9074         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
9075                  rte_be_to_cpu_16(geneve_m->protocol));
9076         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9077                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
9078         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9079         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9080         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9081                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9082         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9083                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9084         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9085                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9086         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9087                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9088                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9089 }
9090
9091 /**
9092  * Create Geneve TLV option resource.
9093  *
9094  * @param dev[in, out]
9095  *   Pointer to rte_eth_dev structure.
9096  * @param[in, out] tag_be24
9097  *   Tag value in big endian then R-shift 8.
9098  * @parm[in, out] dev_flow
9099  *   Pointer to the dev_flow.
9100  * @param[out] error
9101  *   pointer to error structure.
9102  *
9103  * @return
9104  *   0 on success otherwise -errno and errno is set.
9105  */
9106
9107 int
9108 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9109                                              const struct rte_flow_item *item,
9110                                              struct rte_flow_error *error)
9111 {
9112         struct mlx5_priv *priv = dev->data->dev_private;
9113         struct mlx5_dev_ctx_shared *sh = priv->sh;
9114         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9115                         sh->geneve_tlv_option_resource;
9116         struct mlx5_devx_obj *obj;
9117         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9118         int ret = 0;
9119
9120         if (!geneve_opt_v)
9121                 return -1;
9122         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9123         if (geneve_opt_resource != NULL) {
9124                 if (geneve_opt_resource->option_class ==
9125                         geneve_opt_v->option_class &&
9126                         geneve_opt_resource->option_type ==
9127                         geneve_opt_v->option_type &&
9128                         geneve_opt_resource->length ==
9129                         geneve_opt_v->option_len) {
9130                         /* We already have GENVE TLV option obj allocated. */
9131                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9132                                            __ATOMIC_RELAXED);
9133                 } else {
9134                         ret = rte_flow_error_set(error, ENOMEM,
9135                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9136                                 "Only one GENEVE TLV option supported");
9137                         goto exit;
9138                 }
9139         } else {
9140                 /* Create a GENEVE TLV object and resource. */
9141                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9142                                 geneve_opt_v->option_class,
9143                                 geneve_opt_v->option_type,
9144                                 geneve_opt_v->option_len);
9145                 if (!obj) {
9146                         ret = rte_flow_error_set(error, ENODATA,
9147                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9148                                 "Failed to create GENEVE TLV Devx object");
9149                         goto exit;
9150                 }
9151                 sh->geneve_tlv_option_resource =
9152                                 mlx5_malloc(MLX5_MEM_ZERO,
9153                                                 sizeof(*geneve_opt_resource),
9154                                                 0, SOCKET_ID_ANY);
9155                 if (!sh->geneve_tlv_option_resource) {
9156                         claim_zero(mlx5_devx_cmd_destroy(obj));
9157                         ret = rte_flow_error_set(error, ENOMEM,
9158                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9159                                 "GENEVE TLV object memory allocation failed");
9160                         goto exit;
9161                 }
9162                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9163                 geneve_opt_resource->obj = obj;
9164                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9165                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9166                 geneve_opt_resource->length = geneve_opt_v->option_len;
9167                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9168                                 __ATOMIC_RELAXED);
9169         }
9170 exit:
9171         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9172         return ret;
9173 }
9174
9175 /**
9176  * Add Geneve TLV option item to matcher.
9177  *
9178  * @param[in, out] dev
9179  *   Pointer to rte_eth_dev structure.
9180  * @param[in, out] matcher
9181  *   Flow matcher.
9182  * @param[in, out] key
9183  *   Flow matcher value.
9184  * @param[in] item
9185  *   Flow pattern to translate.
9186  * @param[out] error
9187  *   Pointer to error structure.
9188  */
9189 static int
9190 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9191                                   void *key, const struct rte_flow_item *item,
9192                                   struct rte_flow_error *error)
9193 {
9194         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9195         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9196         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9197         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9198         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9199                         misc_parameters_3);
9200         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9201         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9202         int ret = 0;
9203
9204         if (!geneve_opt_v)
9205                 return -1;
9206         if (!geneve_opt_m)
9207                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9208         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9209                                                            error);
9210         if (ret) {
9211                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9212                 return ret;
9213         }
9214         /*
9215          * Set the option length in GENEVE header if not requested.
9216          * The GENEVE TLV option length is expressed by the option length field
9217          * in the GENEVE header.
9218          * If the option length was not requested but the GENEVE TLV option item
9219          * is present we set the option length field implicitly.
9220          */
9221         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9222                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9223                          MLX5_GENEVE_OPTLEN_MASK);
9224                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9225                          geneve_opt_v->option_len + 1);
9226         }
9227         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9228         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9229         /* Set the data. */
9230         if (geneve_opt_v->data) {
9231                 memcpy(&opt_data_key, geneve_opt_v->data,
9232                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9233                                 sizeof(opt_data_key)));
9234                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9235                                 sizeof(opt_data_key));
9236                 memcpy(&opt_data_mask, geneve_opt_m->data,
9237                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9238                                 sizeof(opt_data_mask)));
9239                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9240                                 sizeof(opt_data_mask));
9241                 MLX5_SET(fte_match_set_misc3, misc3_m,
9242                                 geneve_tlv_option_0_data,
9243                                 rte_be_to_cpu_32(opt_data_mask));
9244                 MLX5_SET(fte_match_set_misc3, misc3_v,
9245                                 geneve_tlv_option_0_data,
9246                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9247         }
9248         return ret;
9249 }
9250
9251 /**
9252  * Add MPLS item to matcher and to the value.
9253  *
9254  * @param[in, out] matcher
9255  *   Flow matcher.
9256  * @param[in, out] key
9257  *   Flow matcher value.
9258  * @param[in] item
9259  *   Flow pattern to translate.
9260  * @param[in] prev_layer
9261  *   The protocol layer indicated in previous item.
9262  * @param[in] inner
9263  *   Item is inner pattern.
9264  */
9265 static void
9266 flow_dv_translate_item_mpls(void *matcher, void *key,
9267                             const struct rte_flow_item *item,
9268                             uint64_t prev_layer,
9269                             int inner)
9270 {
9271         const uint32_t *in_mpls_m = item->mask;
9272         const uint32_t *in_mpls_v = item->spec;
9273         uint32_t *out_mpls_m = 0;
9274         uint32_t *out_mpls_v = 0;
9275         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9276         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9277         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9278                                      misc_parameters_2);
9279         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9280         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9281         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9282
9283         switch (prev_layer) {
9284         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9285                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9286                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9287                          MLX5_UDP_PORT_MPLS);
9288                 break;
9289         case MLX5_FLOW_LAYER_GRE:
9290                 /* Fall-through. */
9291         case MLX5_FLOW_LAYER_GRE_KEY:
9292                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9293                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9294                          RTE_ETHER_TYPE_MPLS);
9295                 break;
9296         default:
9297                 break;
9298         }
9299         if (!in_mpls_v)
9300                 return;
9301         if (!in_mpls_m)
9302                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9303         switch (prev_layer) {
9304         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9305                 out_mpls_m =
9306                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9307                                                  outer_first_mpls_over_udp);
9308                 out_mpls_v =
9309                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9310                                                  outer_first_mpls_over_udp);
9311                 break;
9312         case MLX5_FLOW_LAYER_GRE:
9313                 out_mpls_m =
9314                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9315                                                  outer_first_mpls_over_gre);
9316                 out_mpls_v =
9317                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9318                                                  outer_first_mpls_over_gre);
9319                 break;
9320         default:
9321                 /* Inner MPLS not over GRE is not supported. */
9322                 if (!inner) {
9323                         out_mpls_m =
9324                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9325                                                          misc2_m,
9326                                                          outer_first_mpls);
9327                         out_mpls_v =
9328                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9329                                                          misc2_v,
9330                                                          outer_first_mpls);
9331                 }
9332                 break;
9333         }
9334         if (out_mpls_m && out_mpls_v) {
9335                 *out_mpls_m = *in_mpls_m;
9336                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9337         }
9338 }
9339
9340 /**
9341  * Add metadata register item to matcher
9342  *
9343  * @param[in, out] matcher
9344  *   Flow matcher.
9345  * @param[in, out] key
9346  *   Flow matcher value.
9347  * @param[in] reg_type
9348  *   Type of device metadata register
9349  * @param[in] value
9350  *   Register value
9351  * @param[in] mask
9352  *   Register mask
9353  */
9354 static void
9355 flow_dv_match_meta_reg(void *matcher, void *key,
9356                        enum modify_reg reg_type,
9357                        uint32_t data, uint32_t mask)
9358 {
9359         void *misc2_m =
9360                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9361         void *misc2_v =
9362                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9363         uint32_t temp;
9364
9365         data &= mask;
9366         switch (reg_type) {
9367         case REG_A:
9368                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9369                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9370                 break;
9371         case REG_B:
9372                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9373                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9374                 break;
9375         case REG_C_0:
9376                 /*
9377                  * The metadata register C0 field might be divided into
9378                  * source vport index and META item value, we should set
9379                  * this field according to specified mask, not as whole one.
9380                  */
9381                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9382                 temp |= mask;
9383                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9384                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9385                 temp &= ~mask;
9386                 temp |= data;
9387                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9388                 break;
9389         case REG_C_1:
9390                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9391                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9392                 break;
9393         case REG_C_2:
9394                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9395                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9396                 break;
9397         case REG_C_3:
9398                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9399                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9400                 break;
9401         case REG_C_4:
9402                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9403                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9404                 break;
9405         case REG_C_5:
9406                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9407                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9408                 break;
9409         case REG_C_6:
9410                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9411                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9412                 break;
9413         case REG_C_7:
9414                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9415                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9416                 break;
9417         default:
9418                 MLX5_ASSERT(false);
9419                 break;
9420         }
9421 }
9422
9423 /**
9424  * Add MARK item to matcher
9425  *
9426  * @param[in] dev
9427  *   The device to configure through.
9428  * @param[in, out] matcher
9429  *   Flow matcher.
9430  * @param[in, out] key
9431  *   Flow matcher value.
9432  * @param[in] item
9433  *   Flow pattern to translate.
9434  */
9435 static void
9436 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9437                             void *matcher, void *key,
9438                             const struct rte_flow_item *item)
9439 {
9440         struct mlx5_priv *priv = dev->data->dev_private;
9441         const struct rte_flow_item_mark *mark;
9442         uint32_t value;
9443         uint32_t mask;
9444
9445         mark = item->mask ? (const void *)item->mask :
9446                             &rte_flow_item_mark_mask;
9447         mask = mark->id & priv->sh->dv_mark_mask;
9448         mark = (const void *)item->spec;
9449         MLX5_ASSERT(mark);
9450         value = mark->id & priv->sh->dv_mark_mask & mask;
9451         if (mask) {
9452                 enum modify_reg reg;
9453
9454                 /* Get the metadata register index for the mark. */
9455                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9456                 MLX5_ASSERT(reg > 0);
9457                 if (reg == REG_C_0) {
9458                         struct mlx5_priv *priv = dev->data->dev_private;
9459                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9460                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9461
9462                         mask &= msk_c0;
9463                         mask <<= shl_c0;
9464                         value <<= shl_c0;
9465                 }
9466                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9467         }
9468 }
9469
9470 /**
9471  * Add META item to matcher
9472  *
9473  * @param[in] dev
9474  *   The devich to configure through.
9475  * @param[in, out] matcher
9476  *   Flow matcher.
9477  * @param[in, out] key
9478  *   Flow matcher value.
9479  * @param[in] attr
9480  *   Attributes of flow that includes this item.
9481  * @param[in] item
9482  *   Flow pattern to translate.
9483  */
9484 static void
9485 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9486                             void *matcher, void *key,
9487                             const struct rte_flow_attr *attr,
9488                             const struct rte_flow_item *item)
9489 {
9490         const struct rte_flow_item_meta *meta_m;
9491         const struct rte_flow_item_meta *meta_v;
9492
9493         meta_m = (const void *)item->mask;
9494         if (!meta_m)
9495                 meta_m = &rte_flow_item_meta_mask;
9496         meta_v = (const void *)item->spec;
9497         if (meta_v) {
9498                 int reg;
9499                 uint32_t value = meta_v->data;
9500                 uint32_t mask = meta_m->data;
9501
9502                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9503                 if (reg < 0)
9504                         return;
9505                 MLX5_ASSERT(reg != REG_NON);
9506                 if (reg == REG_C_0) {
9507                         struct mlx5_priv *priv = dev->data->dev_private;
9508                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9509                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9510
9511                         mask &= msk_c0;
9512                         mask <<= shl_c0;
9513                         value <<= shl_c0;
9514                 }
9515                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9516         }
9517 }
9518
9519 /**
9520  * Add vport metadata Reg C0 item to matcher
9521  *
9522  * @param[in, out] matcher
9523  *   Flow matcher.
9524  * @param[in, out] key
9525  *   Flow matcher value.
9526  * @param[in] reg
9527  *   Flow pattern to translate.
9528  */
9529 static void
9530 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9531                                   uint32_t value, uint32_t mask)
9532 {
9533         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9534 }
9535
9536 /**
9537  * Add tag item to matcher
9538  *
9539  * @param[in] dev
9540  *   The devich to configure through.
9541  * @param[in, out] matcher
9542  *   Flow matcher.
9543  * @param[in, out] key
9544  *   Flow matcher value.
9545  * @param[in] item
9546  *   Flow pattern to translate.
9547  */
9548 static void
9549 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9550                                 void *matcher, void *key,
9551                                 const struct rte_flow_item *item)
9552 {
9553         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9554         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9555         uint32_t mask, value;
9556
9557         MLX5_ASSERT(tag_v);
9558         value = tag_v->data;
9559         mask = tag_m ? tag_m->data : UINT32_MAX;
9560         if (tag_v->id == REG_C_0) {
9561                 struct mlx5_priv *priv = dev->data->dev_private;
9562                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9563                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9564
9565                 mask &= msk_c0;
9566                 mask <<= shl_c0;
9567                 value <<= shl_c0;
9568         }
9569         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9570 }
9571
9572 /**
9573  * Add TAG item to matcher
9574  *
9575  * @param[in] dev
9576  *   The devich to configure through.
9577  * @param[in, out] matcher
9578  *   Flow matcher.
9579  * @param[in, out] key
9580  *   Flow matcher value.
9581  * @param[in] item
9582  *   Flow pattern to translate.
9583  */
9584 static void
9585 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9586                            void *matcher, void *key,
9587                            const struct rte_flow_item *item)
9588 {
9589         const struct rte_flow_item_tag *tag_v = item->spec;
9590         const struct rte_flow_item_tag *tag_m = item->mask;
9591         enum modify_reg reg;
9592
9593         MLX5_ASSERT(tag_v);
9594         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9595         /* Get the metadata register index for the tag. */
9596         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9597         MLX5_ASSERT(reg > 0);
9598         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9599 }
9600
9601 /**
9602  * Add source vport match to the specified matcher.
9603  *
9604  * @param[in, out] matcher
9605  *   Flow matcher.
9606  * @param[in, out] key
9607  *   Flow matcher value.
9608  * @param[in] port
9609  *   Source vport value to match
9610  * @param[in] mask
9611  *   Mask
9612  */
9613 static void
9614 flow_dv_translate_item_source_vport(void *matcher, void *key,
9615                                     int16_t port, uint16_t mask)
9616 {
9617         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9618         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9619
9620         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9621         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9622 }
9623
9624 /**
9625  * Translate port-id item to eswitch match on  port-id.
9626  *
9627  * @param[in] dev
9628  *   The devich to configure through.
9629  * @param[in, out] matcher
9630  *   Flow matcher.
9631  * @param[in, out] key
9632  *   Flow matcher value.
9633  * @param[in] item
9634  *   Flow pattern to translate.
9635  * @param[in]
9636  *   Flow attributes.
9637  *
9638  * @return
9639  *   0 on success, a negative errno value otherwise.
9640  */
9641 static int
9642 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9643                                void *key, const struct rte_flow_item *item,
9644                                const struct rte_flow_attr *attr)
9645 {
9646         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9647         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9648         struct mlx5_priv *priv;
9649         uint16_t mask, id;
9650
9651         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9652                 flow_dv_translate_item_source_vport(matcher, key,
9653                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9654                 return 0;
9655         }
9656         mask = pid_m ? pid_m->id : 0xffff;
9657         id = pid_v ? pid_v->id : dev->data->port_id;
9658         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9659         if (!priv)
9660                 return -rte_errno;
9661         /*
9662          * Translate to vport field or to metadata, depending on mode.
9663          * Kernel can use either misc.source_port or half of C0 metadata
9664          * register.
9665          */
9666         if (priv->vport_meta_mask) {
9667                 /*
9668                  * Provide the hint for SW steering library
9669                  * to insert the flow into ingress domain and
9670                  * save the extra vport match.
9671                  */
9672                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9673                     priv->pf_bond < 0 && attr->transfer)
9674                         flow_dv_translate_item_source_vport
9675                                 (matcher, key, priv->vport_id, mask);
9676                 /*
9677                  * We should always set the vport metadata register,
9678                  * otherwise the SW steering library can drop
9679                  * the rule if wire vport metadata value is not zero,
9680                  * it depends on kernel configuration.
9681                  */
9682                 flow_dv_translate_item_meta_vport(matcher, key,
9683                                                   priv->vport_meta_tag,
9684                                                   priv->vport_meta_mask);
9685         } else {
9686                 flow_dv_translate_item_source_vport(matcher, key,
9687                                                     priv->vport_id, mask);
9688         }
9689         return 0;
9690 }
9691
9692 /**
9693  * Add ICMP6 item to matcher and to the value.
9694  *
9695  * @param[in, out] matcher
9696  *   Flow matcher.
9697  * @param[in, out] key
9698  *   Flow matcher value.
9699  * @param[in] item
9700  *   Flow pattern to translate.
9701  * @param[in] inner
9702  *   Item is inner pattern.
9703  */
9704 static void
9705 flow_dv_translate_item_icmp6(void *matcher, void *key,
9706                               const struct rte_flow_item *item,
9707                               int inner)
9708 {
9709         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9710         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9711         void *headers_m;
9712         void *headers_v;
9713         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9714                                      misc_parameters_3);
9715         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9716         if (inner) {
9717                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9718                                          inner_headers);
9719                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9720         } else {
9721                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9722                                          outer_headers);
9723                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9724         }
9725         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9726         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9727         if (!icmp6_v)
9728                 return;
9729         if (!icmp6_m)
9730                 icmp6_m = &rte_flow_item_icmp6_mask;
9731         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9732         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9733                  icmp6_v->type & icmp6_m->type);
9734         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9735         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9736                  icmp6_v->code & icmp6_m->code);
9737 }
9738
9739 /**
9740  * Add ICMP item to matcher and to the value.
9741  *
9742  * @param[in, out] matcher
9743  *   Flow matcher.
9744  * @param[in, out] key
9745  *   Flow matcher value.
9746  * @param[in] item
9747  *   Flow pattern to translate.
9748  * @param[in] inner
9749  *   Item is inner pattern.
9750  */
9751 static void
9752 flow_dv_translate_item_icmp(void *matcher, void *key,
9753                             const struct rte_flow_item *item,
9754                             int inner)
9755 {
9756         const struct rte_flow_item_icmp *icmp_m = item->mask;
9757         const struct rte_flow_item_icmp *icmp_v = item->spec;
9758         uint32_t icmp_header_data_m = 0;
9759         uint32_t icmp_header_data_v = 0;
9760         void *headers_m;
9761         void *headers_v;
9762         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9763                                      misc_parameters_3);
9764         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9765         if (inner) {
9766                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9767                                          inner_headers);
9768                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9769         } else {
9770                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9771                                          outer_headers);
9772                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9773         }
9774         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9775         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9776         if (!icmp_v)
9777                 return;
9778         if (!icmp_m)
9779                 icmp_m = &rte_flow_item_icmp_mask;
9780         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9781                  icmp_m->hdr.icmp_type);
9782         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9783                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9784         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9785                  icmp_m->hdr.icmp_code);
9786         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9787                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9788         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9789         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9790         if (icmp_header_data_m) {
9791                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9792                 icmp_header_data_v |=
9793                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9794                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9795                          icmp_header_data_m);
9796                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9797                          icmp_header_data_v & icmp_header_data_m);
9798         }
9799 }
9800
9801 /**
9802  * Add GTP item to matcher and to the value.
9803  *
9804  * @param[in, out] matcher
9805  *   Flow matcher.
9806  * @param[in, out] key
9807  *   Flow matcher value.
9808  * @param[in] item
9809  *   Flow pattern to translate.
9810  * @param[in] inner
9811  *   Item is inner pattern.
9812  */
9813 static void
9814 flow_dv_translate_item_gtp(void *matcher, void *key,
9815                            const struct rte_flow_item *item, int inner)
9816 {
9817         const struct rte_flow_item_gtp *gtp_m = item->mask;
9818         const struct rte_flow_item_gtp *gtp_v = item->spec;
9819         void *headers_m;
9820         void *headers_v;
9821         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9822                                      misc_parameters_3);
9823         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9824         uint16_t dport = RTE_GTPU_UDP_PORT;
9825
9826         if (inner) {
9827                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9828                                          inner_headers);
9829                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9830         } else {
9831                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9832                                          outer_headers);
9833                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9834         }
9835         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9836                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9837                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9838         }
9839         if (!gtp_v)
9840                 return;
9841         if (!gtp_m)
9842                 gtp_m = &rte_flow_item_gtp_mask;
9843         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9844                  gtp_m->v_pt_rsv_flags);
9845         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9846                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9847         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9848         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9849                  gtp_v->msg_type & gtp_m->msg_type);
9850         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9851                  rte_be_to_cpu_32(gtp_m->teid));
9852         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9853                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9854 }
9855
9856 /**
9857  * Add GTP PSC item to matcher.
9858  *
9859  * @param[in, out] matcher
9860  *   Flow matcher.
9861  * @param[in, out] key
9862  *   Flow matcher value.
9863  * @param[in] item
9864  *   Flow pattern to translate.
9865  */
9866 static int
9867 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9868                                const struct rte_flow_item *item)
9869 {
9870         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9871         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9872         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9873                         misc_parameters_3);
9874         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9875         union {
9876                 uint32_t w32;
9877                 struct {
9878                         uint16_t seq_num;
9879                         uint8_t npdu_num;
9880                         uint8_t next_ext_header_type;
9881                 };
9882         } dw_2;
9883         uint8_t gtp_flags;
9884
9885         /* Always set E-flag match on one, regardless of GTP item settings. */
9886         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9887         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9888         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9889         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9890         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9891         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9892         /*Set next extension header type. */
9893         dw_2.seq_num = 0;
9894         dw_2.npdu_num = 0;
9895         dw_2.next_ext_header_type = 0xff;
9896         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9897                  rte_cpu_to_be_32(dw_2.w32));
9898         dw_2.seq_num = 0;
9899         dw_2.npdu_num = 0;
9900         dw_2.next_ext_header_type = 0x85;
9901         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9902                  rte_cpu_to_be_32(dw_2.w32));
9903         if (gtp_psc_v) {
9904                 union {
9905                         uint32_t w32;
9906                         struct {
9907                                 uint8_t len;
9908                                 uint8_t type_flags;
9909                                 uint8_t qfi;
9910                                 uint8_t reserved;
9911                         };
9912                 } dw_0;
9913
9914                 /*Set extension header PDU type and Qos. */
9915                 if (!gtp_psc_m)
9916                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9917                 dw_0.w32 = 0;
9918                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9919                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9920                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9921                          rte_cpu_to_be_32(dw_0.w32));
9922                 dw_0.w32 = 0;
9923                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9924                                                         gtp_psc_m->hdr.type);
9925                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9926                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9927                          rte_cpu_to_be_32(dw_0.w32));
9928         }
9929         return 0;
9930 }
9931
9932 /**
9933  * Add eCPRI item to matcher and to the value.
9934  *
9935  * @param[in] dev
9936  *   The devich to configure through.
9937  * @param[in, out] matcher
9938  *   Flow matcher.
9939  * @param[in, out] key
9940  *   Flow matcher value.
9941  * @param[in] item
9942  *   Flow pattern to translate.
9943  * @param[in] last_item
9944  *   Last item flags.
9945  */
9946 static void
9947 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9948                              void *key, const struct rte_flow_item *item,
9949                              uint64_t last_item)
9950 {
9951         struct mlx5_priv *priv = dev->data->dev_private;
9952         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9953         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9954         struct rte_ecpri_common_hdr common;
9955         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9956                                      misc_parameters_4);
9957         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9958         uint32_t *samples;
9959         void *dw_m;
9960         void *dw_v;
9961
9962         /*
9963          * In case of eCPRI over Ethernet, if EtherType is not specified,
9964          * match on eCPRI EtherType implicitly.
9965          */
9966         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
9967                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
9968
9969                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9970                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9971                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
9972                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
9973                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
9974                         *(uint16_t *)l2m = UINT16_MAX;
9975                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
9976                 }
9977         }
9978         if (!ecpri_v)
9979                 return;
9980         if (!ecpri_m)
9981                 ecpri_m = &rte_flow_item_ecpri_mask;
9982         /*
9983          * Maximal four DW samples are supported in a single matching now.
9984          * Two are used now for a eCPRI matching:
9985          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9986          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9987          *    if any.
9988          */
9989         if (!ecpri_m->hdr.common.u32)
9990                 return;
9991         samples = priv->sh->ecpri_parser.ids;
9992         /* Need to take the whole DW as the mask to fill the entry. */
9993         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9994                             prog_sample_field_value_0);
9995         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9996                             prog_sample_field_value_0);
9997         /* Already big endian (network order) in the header. */
9998         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9999         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10000         /* Sample#0, used for matching type, offset 0. */
10001         MLX5_SET(fte_match_set_misc4, misc4_m,
10002                  prog_sample_field_id_0, samples[0]);
10003         /* It makes no sense to set the sample ID in the mask field. */
10004         MLX5_SET(fte_match_set_misc4, misc4_v,
10005                  prog_sample_field_id_0, samples[0]);
10006         /*
10007          * Checking if message body part needs to be matched.
10008          * Some wildcard rules only matching type field should be supported.
10009          */
10010         if (ecpri_m->hdr.dummy[0]) {
10011                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10012                 switch (common.type) {
10013                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10014                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10015                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10016                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10017                                             prog_sample_field_value_1);
10018                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10019                                             prog_sample_field_value_1);
10020                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10021                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10022                                             ecpri_m->hdr.dummy[0];
10023                         /* Sample#1, to match message body, offset 4. */
10024                         MLX5_SET(fte_match_set_misc4, misc4_m,
10025                                  prog_sample_field_id_1, samples[1]);
10026                         MLX5_SET(fte_match_set_misc4, misc4_v,
10027                                  prog_sample_field_id_1, samples[1]);
10028                         break;
10029                 default:
10030                         /* Others, do not match any sample ID. */
10031                         break;
10032                 }
10033         }
10034 }
10035
10036 /*
10037  * Add connection tracking status item to matcher
10038  *
10039  * @param[in] dev
10040  *   The devich to configure through.
10041  * @param[in, out] matcher
10042  *   Flow matcher.
10043  * @param[in, out] key
10044  *   Flow matcher value.
10045  * @param[in] item
10046  *   Flow pattern to translate.
10047  */
10048 static void
10049 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10050                               void *matcher, void *key,
10051                               const struct rte_flow_item *item)
10052 {
10053         uint32_t reg_value = 0;
10054         int reg_id;
10055         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10056         uint32_t reg_mask = 0;
10057         const struct rte_flow_item_conntrack *spec = item->spec;
10058         const struct rte_flow_item_conntrack *mask = item->mask;
10059         uint32_t flags;
10060         struct rte_flow_error error;
10061
10062         if (!mask)
10063                 mask = &rte_flow_item_conntrack_mask;
10064         if (!spec || !mask->flags)
10065                 return;
10066         flags = spec->flags & mask->flags;
10067         /* The conflict should be checked in the validation. */
10068         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10069                 reg_value |= MLX5_CT_SYNDROME_VALID;
10070         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10071                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10072         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10073                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10074         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10075                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10076         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10077                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10078         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10079                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10080                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10081                 reg_mask |= 0xc0;
10082         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10083                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10084         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10085                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10086         /* The REG_C_x value could be saved during startup. */
10087         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10088         if (reg_id == REG_NON)
10089                 return;
10090         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10091                                reg_value, reg_mask);
10092 }
10093
10094 static void
10095 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10096                             const struct rte_flow_item *item,
10097                             struct mlx5_flow *dev_flow, bool is_inner)
10098 {
10099         const struct rte_flow_item_flex *spec =
10100                 (const struct rte_flow_item_flex *)item->spec;
10101         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10102
10103         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10104         if (index < 0)
10105                 return;
10106         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10107                 /* Don't count both inner and outer flex items in one rule. */
10108                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10109                         MLX5_ASSERT(false);
10110                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10111         }
10112         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10113 }
10114
10115 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10116
10117 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10118         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10119                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10120
10121 /**
10122  * Calculate flow matcher enable bitmap.
10123  *
10124  * @param match_criteria
10125  *   Pointer to flow matcher criteria.
10126  *
10127  * @return
10128  *   Bitmap of enabled fields.
10129  */
10130 static uint8_t
10131 flow_dv_matcher_enable(uint32_t *match_criteria)
10132 {
10133         uint8_t match_criteria_enable;
10134
10135         match_criteria_enable =
10136                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10137                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10138         match_criteria_enable |=
10139                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10140                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10141         match_criteria_enable |=
10142                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10143                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10144         match_criteria_enable |=
10145                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10146                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10147         match_criteria_enable |=
10148                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10149                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10150         match_criteria_enable |=
10151                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10152                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10153         match_criteria_enable |=
10154                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10155                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10156         return match_criteria_enable;
10157 }
10158
10159 static void
10160 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10161 {
10162         /*
10163          * Check flow matching criteria first, subtract misc5/4 length if flow
10164          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10165          * misc5/4 are not supported, and matcher creation failure is expected
10166          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10167          * misc5 is right after misc4.
10168          */
10169         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10170                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10171                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10172                 if (!(match_criteria & (1 <<
10173                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10174                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10175                 }
10176         }
10177 }
10178
10179 static struct mlx5_list_entry *
10180 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10181                          struct mlx5_list_entry *entry, void *cb_ctx)
10182 {
10183         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10184         struct mlx5_flow_dv_matcher *ref = ctx->data;
10185         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10186                                                             typeof(*tbl), tbl);
10187         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10188                                                             sizeof(*resource),
10189                                                             0, SOCKET_ID_ANY);
10190
10191         if (!resource) {
10192                 rte_flow_error_set(ctx->error, ENOMEM,
10193                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10194                                    "cannot create matcher");
10195                 return NULL;
10196         }
10197         memcpy(resource, entry, sizeof(*resource));
10198         resource->tbl = &tbl->tbl;
10199         return &resource->entry;
10200 }
10201
10202 static void
10203 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10204                              struct mlx5_list_entry *entry)
10205 {
10206         mlx5_free(entry);
10207 }
10208
10209 struct mlx5_list_entry *
10210 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10211 {
10212         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10213         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10214         struct rte_eth_dev *dev = ctx->dev;
10215         struct mlx5_flow_tbl_data_entry *tbl_data;
10216         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10217         struct rte_flow_error *error = ctx->error;
10218         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10219         struct mlx5_flow_tbl_resource *tbl;
10220         void *domain;
10221         uint32_t idx = 0;
10222         int ret;
10223
10224         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10225         if (!tbl_data) {
10226                 rte_flow_error_set(error, ENOMEM,
10227                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10228                                    NULL,
10229                                    "cannot allocate flow table data entry");
10230                 return NULL;
10231         }
10232         tbl_data->idx = idx;
10233         tbl_data->tunnel = tt_prm->tunnel;
10234         tbl_data->group_id = tt_prm->group_id;
10235         tbl_data->external = !!tt_prm->external;
10236         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10237         tbl_data->is_egress = !!key.is_egress;
10238         tbl_data->is_transfer = !!key.is_fdb;
10239         tbl_data->dummy = !!key.dummy;
10240         tbl_data->level = key.level;
10241         tbl_data->id = key.id;
10242         tbl = &tbl_data->tbl;
10243         if (key.dummy)
10244                 return &tbl_data->entry;
10245         if (key.is_fdb)
10246                 domain = sh->fdb_domain;
10247         else if (key.is_egress)
10248                 domain = sh->tx_domain;
10249         else
10250                 domain = sh->rx_domain;
10251         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10252         if (ret) {
10253                 rte_flow_error_set(error, ENOMEM,
10254                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10255                                    NULL, "cannot create flow table object");
10256                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10257                 return NULL;
10258         }
10259         if (key.level != 0) {
10260                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10261                                         (tbl->obj, &tbl_data->jump.action);
10262                 if (ret) {
10263                         rte_flow_error_set(error, ENOMEM,
10264                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10265                                            NULL,
10266                                            "cannot create flow jump action");
10267                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10268                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10269                         return NULL;
10270                 }
10271         }
10272         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10273               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10274               key.level, key.id);
10275         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10276                                               flow_dv_matcher_create_cb,
10277                                               flow_dv_matcher_match_cb,
10278                                               flow_dv_matcher_remove_cb,
10279                                               flow_dv_matcher_clone_cb,
10280                                               flow_dv_matcher_clone_free_cb);
10281         if (!tbl_data->matchers) {
10282                 rte_flow_error_set(error, ENOMEM,
10283                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10284                                    NULL,
10285                                    "cannot create tbl matcher list");
10286                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10287                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10288                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10289                 return NULL;
10290         }
10291         return &tbl_data->entry;
10292 }
10293
10294 int
10295 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10296                      void *cb_ctx)
10297 {
10298         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10299         struct mlx5_flow_tbl_data_entry *tbl_data =
10300                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10301         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10302
10303         return tbl_data->level != key.level ||
10304                tbl_data->id != key.id ||
10305                tbl_data->dummy != key.dummy ||
10306                tbl_data->is_transfer != !!key.is_fdb ||
10307                tbl_data->is_egress != !!key.is_egress;
10308 }
10309
10310 struct mlx5_list_entry *
10311 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10312                       void *cb_ctx)
10313 {
10314         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10315         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10316         struct mlx5_flow_tbl_data_entry *tbl_data;
10317         struct rte_flow_error *error = ctx->error;
10318         uint32_t idx = 0;
10319
10320         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10321         if (!tbl_data) {
10322                 rte_flow_error_set(error, ENOMEM,
10323                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10324                                    NULL,
10325                                    "cannot allocate flow table data entry");
10326                 return NULL;
10327         }
10328         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10329         tbl_data->idx = idx;
10330         return &tbl_data->entry;
10331 }
10332
10333 void
10334 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10335 {
10336         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10337         struct mlx5_flow_tbl_data_entry *tbl_data =
10338                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10339
10340         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10341 }
10342
10343 /**
10344  * Get a flow table.
10345  *
10346  * @param[in, out] dev
10347  *   Pointer to rte_eth_dev structure.
10348  * @param[in] table_level
10349  *   Table level to use.
10350  * @param[in] egress
10351  *   Direction of the table.
10352  * @param[in] transfer
10353  *   E-Switch or NIC flow.
10354  * @param[in] dummy
10355  *   Dummy entry for dv API.
10356  * @param[in] table_id
10357  *   Table id to use.
10358  * @param[out] error
10359  *   pointer to error structure.
10360  *
10361  * @return
10362  *   Returns tables resource based on the index, NULL in case of failed.
10363  */
10364 struct mlx5_flow_tbl_resource *
10365 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10366                          uint32_t table_level, uint8_t egress,
10367                          uint8_t transfer,
10368                          bool external,
10369                          const struct mlx5_flow_tunnel *tunnel,
10370                          uint32_t group_id, uint8_t dummy,
10371                          uint32_t table_id,
10372                          struct rte_flow_error *error)
10373 {
10374         struct mlx5_priv *priv = dev->data->dev_private;
10375         union mlx5_flow_tbl_key table_key = {
10376                 {
10377                         .level = table_level,
10378                         .id = table_id,
10379                         .reserved = 0,
10380                         .dummy = !!dummy,
10381                         .is_fdb = !!transfer,
10382                         .is_egress = !!egress,
10383                 }
10384         };
10385         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10386                 .tunnel = tunnel,
10387                 .group_id = group_id,
10388                 .external = external,
10389         };
10390         struct mlx5_flow_cb_ctx ctx = {
10391                 .dev = dev,
10392                 .error = error,
10393                 .data = &table_key.v64,
10394                 .data2 = &tt_prm,
10395         };
10396         struct mlx5_list_entry *entry;
10397         struct mlx5_flow_tbl_data_entry *tbl_data;
10398
10399         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10400         if (!entry) {
10401                 rte_flow_error_set(error, ENOMEM,
10402                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10403                                    "cannot get table");
10404                 return NULL;
10405         }
10406         DRV_LOG(DEBUG, "table_level %u table_id %u "
10407                 "tunnel %u group %u registered.",
10408                 table_level, table_id,
10409                 tunnel ? tunnel->tunnel_id : 0, group_id);
10410         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10411         return &tbl_data->tbl;
10412 }
10413
10414 void
10415 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10416 {
10417         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10418         struct mlx5_flow_tbl_data_entry *tbl_data =
10419                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10420
10421         MLX5_ASSERT(entry && sh);
10422         if (tbl_data->jump.action)
10423                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10424         if (tbl_data->tbl.obj)
10425                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10426         if (tbl_data->tunnel_offload && tbl_data->external) {
10427                 struct mlx5_list_entry *he;
10428                 struct mlx5_hlist *tunnel_grp_hash;
10429                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10430                 union tunnel_tbl_key tunnel_key = {
10431                         .tunnel_id = tbl_data->tunnel ?
10432                                         tbl_data->tunnel->tunnel_id : 0,
10433                         .group = tbl_data->group_id
10434                 };
10435                 uint32_t table_level = tbl_data->level;
10436                 struct mlx5_flow_cb_ctx ctx = {
10437                         .data = (void *)&tunnel_key.val,
10438                 };
10439
10440                 tunnel_grp_hash = tbl_data->tunnel ?
10441                                         tbl_data->tunnel->groups :
10442                                         thub->groups;
10443                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10444                 if (he)
10445                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10446                 DRV_LOG(DEBUG,
10447                         "table_level %u id %u tunnel %u group %u released.",
10448                         table_level,
10449                         tbl_data->id,
10450                         tbl_data->tunnel ?
10451                         tbl_data->tunnel->tunnel_id : 0,
10452                         tbl_data->group_id);
10453         }
10454         mlx5_list_destroy(tbl_data->matchers);
10455         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10456 }
10457
10458 /**
10459  * Release a flow table.
10460  *
10461  * @param[in] sh
10462  *   Pointer to device shared structure.
10463  * @param[in] tbl
10464  *   Table resource to be released.
10465  *
10466  * @return
10467  *   Returns 0 if table was released, else return 1;
10468  */
10469 static int
10470 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10471                              struct mlx5_flow_tbl_resource *tbl)
10472 {
10473         struct mlx5_flow_tbl_data_entry *tbl_data =
10474                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10475
10476         if (!tbl)
10477                 return 0;
10478         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10479 }
10480
10481 int
10482 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10483                          struct mlx5_list_entry *entry, void *cb_ctx)
10484 {
10485         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10486         struct mlx5_flow_dv_matcher *ref = ctx->data;
10487         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10488                                                         entry);
10489
10490         return cur->crc != ref->crc ||
10491                cur->priority != ref->priority ||
10492                memcmp((const void *)cur->mask.buf,
10493                       (const void *)ref->mask.buf, ref->mask.size);
10494 }
10495
10496 struct mlx5_list_entry *
10497 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10498 {
10499         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10500         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10501         struct mlx5_flow_dv_matcher *ref = ctx->data;
10502         struct mlx5_flow_dv_matcher *resource;
10503         struct mlx5dv_flow_matcher_attr dv_attr = {
10504                 .type = IBV_FLOW_ATTR_NORMAL,
10505                 .match_mask = (void *)&ref->mask,
10506         };
10507         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10508                                                             typeof(*tbl), tbl);
10509         int ret;
10510
10511         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10512                                SOCKET_ID_ANY);
10513         if (!resource) {
10514                 rte_flow_error_set(ctx->error, ENOMEM,
10515                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10516                                    "cannot create matcher");
10517                 return NULL;
10518         }
10519         *resource = *ref;
10520         dv_attr.match_criteria_enable =
10521                 flow_dv_matcher_enable(resource->mask.buf);
10522         __flow_dv_adjust_buf_size(&ref->mask.size,
10523                                   dv_attr.match_criteria_enable);
10524         dv_attr.priority = ref->priority;
10525         if (tbl->is_egress)
10526                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10527         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10528                                                tbl->tbl.obj,
10529                                                &resource->matcher_object);
10530         if (ret) {
10531                 mlx5_free(resource);
10532                 rte_flow_error_set(ctx->error, ENOMEM,
10533                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10534                                    "cannot create matcher");
10535                 return NULL;
10536         }
10537         return &resource->entry;
10538 }
10539
10540 /**
10541  * Register the flow matcher.
10542  *
10543  * @param[in, out] dev
10544  *   Pointer to rte_eth_dev structure.
10545  * @param[in, out] matcher
10546  *   Pointer to flow matcher.
10547  * @param[in, out] key
10548  *   Pointer to flow table key.
10549  * @parm[in, out] dev_flow
10550  *   Pointer to the dev_flow.
10551  * @param[out] error
10552  *   pointer to error structure.
10553  *
10554  * @return
10555  *   0 on success otherwise -errno and errno is set.
10556  */
10557 static int
10558 flow_dv_matcher_register(struct rte_eth_dev *dev,
10559                          struct mlx5_flow_dv_matcher *ref,
10560                          union mlx5_flow_tbl_key *key,
10561                          struct mlx5_flow *dev_flow,
10562                          const struct mlx5_flow_tunnel *tunnel,
10563                          uint32_t group_id,
10564                          struct rte_flow_error *error)
10565 {
10566         struct mlx5_list_entry *entry;
10567         struct mlx5_flow_dv_matcher *resource;
10568         struct mlx5_flow_tbl_resource *tbl;
10569         struct mlx5_flow_tbl_data_entry *tbl_data;
10570         struct mlx5_flow_cb_ctx ctx = {
10571                 .error = error,
10572                 .data = ref,
10573         };
10574         /**
10575          * tunnel offload API requires this registration for cases when
10576          * tunnel match rule was inserted before tunnel set rule.
10577          */
10578         tbl = flow_dv_tbl_resource_get(dev, key->level,
10579                                        key->is_egress, key->is_fdb,
10580                                        dev_flow->external, tunnel,
10581                                        group_id, 0, key->id, error);
10582         if (!tbl)
10583                 return -rte_errno;      /* No need to refill the error info */
10584         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10585         ref->tbl = tbl;
10586         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10587         if (!entry) {
10588                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10589                 return rte_flow_error_set(error, ENOMEM,
10590                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10591                                           "cannot allocate ref memory");
10592         }
10593         resource = container_of(entry, typeof(*resource), entry);
10594         dev_flow->handle->dvh.matcher = resource;
10595         return 0;
10596 }
10597
10598 struct mlx5_list_entry *
10599 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10600 {
10601         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10602         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10603         struct mlx5_flow_dv_tag_resource *entry;
10604         uint32_t idx = 0;
10605         int ret;
10606
10607         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10608         if (!entry) {
10609                 rte_flow_error_set(ctx->error, ENOMEM,
10610                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10611                                    "cannot allocate resource memory");
10612                 return NULL;
10613         }
10614         entry->idx = idx;
10615         entry->tag_id = *(uint32_t *)(ctx->data);
10616         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10617                                                   &entry->action);
10618         if (ret) {
10619                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10620                 rte_flow_error_set(ctx->error, ENOMEM,
10621                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10622                                    NULL, "cannot create action");
10623                 return NULL;
10624         }
10625         return &entry->entry;
10626 }
10627
10628 int
10629 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10630                      void *cb_ctx)
10631 {
10632         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10633         struct mlx5_flow_dv_tag_resource *tag =
10634                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10635
10636         return *(uint32_t *)(ctx->data) != tag->tag_id;
10637 }
10638
10639 struct mlx5_list_entry *
10640 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10641                      void *cb_ctx)
10642 {
10643         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10644         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10645         struct mlx5_flow_dv_tag_resource *entry;
10646         uint32_t idx = 0;
10647
10648         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10649         if (!entry) {
10650                 rte_flow_error_set(ctx->error, ENOMEM,
10651                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10652                                    "cannot allocate tag resource memory");
10653                 return NULL;
10654         }
10655         memcpy(entry, oentry, sizeof(*entry));
10656         entry->idx = idx;
10657         return &entry->entry;
10658 }
10659
10660 void
10661 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10662 {
10663         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10664         struct mlx5_flow_dv_tag_resource *tag =
10665                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10666
10667         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10668 }
10669
10670 /**
10671  * Find existing tag resource or create and register a new one.
10672  *
10673  * @param dev[in, out]
10674  *   Pointer to rte_eth_dev structure.
10675  * @param[in, out] tag_be24
10676  *   Tag value in big endian then R-shift 8.
10677  * @parm[in, out] dev_flow
10678  *   Pointer to the dev_flow.
10679  * @param[out] error
10680  *   pointer to error structure.
10681  *
10682  * @return
10683  *   0 on success otherwise -errno and errno is set.
10684  */
10685 static int
10686 flow_dv_tag_resource_register
10687                         (struct rte_eth_dev *dev,
10688                          uint32_t tag_be24,
10689                          struct mlx5_flow *dev_flow,
10690                          struct rte_flow_error *error)
10691 {
10692         struct mlx5_priv *priv = dev->data->dev_private;
10693         struct mlx5_flow_dv_tag_resource *resource;
10694         struct mlx5_list_entry *entry;
10695         struct mlx5_flow_cb_ctx ctx = {
10696                                         .error = error,
10697                                         .data = &tag_be24,
10698                                         };
10699         struct mlx5_hlist *tag_table;
10700
10701         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10702                                       "tags",
10703                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10704                                       false, false, priv->sh,
10705                                       flow_dv_tag_create_cb,
10706                                       flow_dv_tag_match_cb,
10707                                       flow_dv_tag_remove_cb,
10708                                       flow_dv_tag_clone_cb,
10709                                       flow_dv_tag_clone_free_cb);
10710         if (unlikely(!tag_table))
10711                 return -rte_errno;
10712         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10713         if (entry) {
10714                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10715                                         entry);
10716                 dev_flow->handle->dvh.rix_tag = resource->idx;
10717                 dev_flow->dv.tag_resource = resource;
10718                 return 0;
10719         }
10720         return -rte_errno;
10721 }
10722
10723 void
10724 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10725 {
10726         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10727         struct mlx5_flow_dv_tag_resource *tag =
10728                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10729
10730         MLX5_ASSERT(tag && sh && tag->action);
10731         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10732         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10733         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10734 }
10735
10736 /**
10737  * Release the tag.
10738  *
10739  * @param dev
10740  *   Pointer to Ethernet device.
10741  * @param tag_idx
10742  *   Tag index.
10743  *
10744  * @return
10745  *   1 while a reference on it exists, 0 when freed.
10746  */
10747 static int
10748 flow_dv_tag_release(struct rte_eth_dev *dev,
10749                     uint32_t tag_idx)
10750 {
10751         struct mlx5_priv *priv = dev->data->dev_private;
10752         struct mlx5_flow_dv_tag_resource *tag;
10753
10754         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10755         if (!tag)
10756                 return 0;
10757         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10758                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10759         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10760 }
10761
10762 /**
10763  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10764  *
10765  * @param[in] dev
10766  *   Pointer to rte_eth_dev structure.
10767  * @param[in] action
10768  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10769  * @param[out] dst_port_id
10770  *   The target port ID.
10771  * @param[out] error
10772  *   Pointer to the error structure.
10773  *
10774  * @return
10775  *   0 on success, a negative errno value otherwise and rte_errno is set.
10776  */
10777 static int
10778 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10779                                  const struct rte_flow_action *action,
10780                                  uint32_t *dst_port_id,
10781                                  struct rte_flow_error *error)
10782 {
10783         uint32_t port;
10784         struct mlx5_priv *priv;
10785
10786         switch (action->type) {
10787         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10788                 const struct rte_flow_action_port_id *conf;
10789
10790                 conf = (const struct rte_flow_action_port_id *)action->conf;
10791                 port = conf->original ? dev->data->port_id : conf->id;
10792                 break;
10793         }
10794         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10795                 const struct rte_flow_action_ethdev *ethdev;
10796
10797                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10798                 port = ethdev->port_id;
10799                 break;
10800         }
10801         default:
10802                 MLX5_ASSERT(false);
10803                 return rte_flow_error_set(error, EINVAL,
10804                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10805                                           "unknown E-Switch action");
10806         }
10807
10808         priv = mlx5_port_to_eswitch_info(port, false);
10809         if (!priv)
10810                 return rte_flow_error_set(error, -rte_errno,
10811                                           RTE_FLOW_ERROR_TYPE_ACTION,
10812                                           NULL,
10813                                           "No eswitch info was found for port");
10814 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10815         /*
10816          * This parameter is transferred to
10817          * mlx5dv_dr_action_create_dest_ib_port().
10818          */
10819         *dst_port_id = priv->dev_port;
10820 #else
10821         /*
10822          * Legacy mode, no LAG configurations is supported.
10823          * This parameter is transferred to
10824          * mlx5dv_dr_action_create_dest_vport().
10825          */
10826         *dst_port_id = priv->vport_id;
10827 #endif
10828         return 0;
10829 }
10830
10831 /**
10832  * Create a counter with aging configuration.
10833  *
10834  * @param[in] dev
10835  *   Pointer to rte_eth_dev structure.
10836  * @param[in] dev_flow
10837  *   Pointer to the mlx5_flow.
10838  * @param[out] count
10839  *   Pointer to the counter action configuration.
10840  * @param[in] age
10841  *   Pointer to the aging action configuration.
10842  *
10843  * @return
10844  *   Index to flow counter on success, 0 otherwise.
10845  */
10846 static uint32_t
10847 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10848                                 struct mlx5_flow *dev_flow,
10849                                 const struct rte_flow_action_count *count
10850                                         __rte_unused,
10851                                 const struct rte_flow_action_age *age)
10852 {
10853         uint32_t counter;
10854         struct mlx5_age_param *age_param;
10855
10856         counter = flow_dv_counter_alloc(dev, !!age);
10857         if (!counter || age == NULL)
10858                 return counter;
10859         age_param = flow_dv_counter_idx_get_age(dev, counter);
10860         age_param->context = age->context ? age->context :
10861                 (void *)(uintptr_t)(dev_flow->flow_idx);
10862         age_param->timeout = age->timeout;
10863         age_param->port_id = dev->data->port_id;
10864         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10865         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10866         return counter;
10867 }
10868
10869 /**
10870  * Add Tx queue matcher
10871  *
10872  * @param[in] dev
10873  *   Pointer to the dev struct.
10874  * @param[in, out] matcher
10875  *   Flow matcher.
10876  * @param[in, out] key
10877  *   Flow matcher value.
10878  * @param[in] item
10879  *   Flow pattern to translate.
10880  * @param[in] inner
10881  *   Item is inner pattern.
10882  */
10883 static void
10884 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10885                                 void *matcher, void *key,
10886                                 const struct rte_flow_item *item)
10887 {
10888         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10889         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10890         void *misc_m =
10891                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10892         void *misc_v =
10893                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10894         struct mlx5_txq_ctrl *txq;
10895         uint32_t queue, mask;
10896
10897         queue_m = (const void *)item->mask;
10898         queue_v = (const void *)item->spec;
10899         if (!queue_v)
10900                 return;
10901         txq = mlx5_txq_get(dev, queue_v->queue);
10902         if (!txq)
10903                 return;
10904         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10905                 queue = txq->obj->sq->id;
10906         else
10907                 queue = txq->obj->sq_obj.sq->id;
10908         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10909         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10910         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10911         mlx5_txq_release(dev, queue_v->queue);
10912 }
10913
10914 /**
10915  * Set the hash fields according to the @p flow information.
10916  *
10917  * @param[in] dev_flow
10918  *   Pointer to the mlx5_flow.
10919  * @param[in] rss_desc
10920  *   Pointer to the mlx5_flow_rss_desc.
10921  */
10922 static void
10923 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10924                        struct mlx5_flow_rss_desc *rss_desc)
10925 {
10926         uint64_t items = dev_flow->handle->layers;
10927         int rss_inner = 0;
10928         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10929
10930         dev_flow->hash_fields = 0;
10931 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10932         if (rss_desc->level >= 2)
10933                 rss_inner = 1;
10934 #endif
10935         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10936             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10937                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10938                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10939                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10940                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10941                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10942                         else
10943                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10944                 }
10945         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10946                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10947                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10948                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10949                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10950                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10951                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10952                         else
10953                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10954                 }
10955         }
10956         if (dev_flow->hash_fields == 0)
10957                 /*
10958                  * There is no match between the RSS types and the
10959                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
10960                  */
10961                 return;
10962         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10963             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10964                 if (rss_types & RTE_ETH_RSS_UDP) {
10965                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
10966                                 dev_flow->hash_fields |=
10967                                                 IBV_RX_HASH_SRC_PORT_UDP;
10968                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
10969                                 dev_flow->hash_fields |=
10970                                                 IBV_RX_HASH_DST_PORT_UDP;
10971                         else
10972                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10973                 }
10974         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10975                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10976                 if (rss_types & RTE_ETH_RSS_TCP) {
10977                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
10978                                 dev_flow->hash_fields |=
10979                                                 IBV_RX_HASH_SRC_PORT_TCP;
10980                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
10981                                 dev_flow->hash_fields |=
10982                                                 IBV_RX_HASH_DST_PORT_TCP;
10983                         else
10984                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10985                 }
10986         }
10987         if (rss_inner)
10988                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10989 }
10990
10991 /**
10992  * Prepare an Rx Hash queue.
10993  *
10994  * @param dev
10995  *   Pointer to Ethernet device.
10996  * @param[in] dev_flow
10997  *   Pointer to the mlx5_flow.
10998  * @param[in] rss_desc
10999  *   Pointer to the mlx5_flow_rss_desc.
11000  * @param[out] hrxq_idx
11001  *   Hash Rx queue index.
11002  *
11003  * @return
11004  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11005  */
11006 static struct mlx5_hrxq *
11007 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11008                      struct mlx5_flow *dev_flow,
11009                      struct mlx5_flow_rss_desc *rss_desc,
11010                      uint32_t *hrxq_idx)
11011 {
11012         struct mlx5_priv *priv = dev->data->dev_private;
11013         struct mlx5_flow_handle *dh = dev_flow->handle;
11014         struct mlx5_hrxq *hrxq;
11015
11016         MLX5_ASSERT(rss_desc->queue_num);
11017         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11018         rss_desc->hash_fields = dev_flow->hash_fields;
11019         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11020         rss_desc->shared_rss = 0;
11021         if (rss_desc->hash_fields == 0)
11022                 rss_desc->queue_num = 1;
11023         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11024         if (!*hrxq_idx)
11025                 return NULL;
11026         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11027                               *hrxq_idx);
11028         return hrxq;
11029 }
11030
11031 /**
11032  * Release sample sub action resource.
11033  *
11034  * @param[in, out] dev
11035  *   Pointer to rte_eth_dev structure.
11036  * @param[in] act_res
11037  *   Pointer to sample sub action resource.
11038  */
11039 static void
11040 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11041                                    struct mlx5_flow_sub_actions_idx *act_res)
11042 {
11043         if (act_res->rix_hrxq) {
11044                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11045                 act_res->rix_hrxq = 0;
11046         }
11047         if (act_res->rix_encap_decap) {
11048                 flow_dv_encap_decap_resource_release(dev,
11049                                                      act_res->rix_encap_decap);
11050                 act_res->rix_encap_decap = 0;
11051         }
11052         if (act_res->rix_port_id_action) {
11053                 flow_dv_port_id_action_resource_release(dev,
11054                                                 act_res->rix_port_id_action);
11055                 act_res->rix_port_id_action = 0;
11056         }
11057         if (act_res->rix_tag) {
11058                 flow_dv_tag_release(dev, act_res->rix_tag);
11059                 act_res->rix_tag = 0;
11060         }
11061         if (act_res->rix_jump) {
11062                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11063                 act_res->rix_jump = 0;
11064         }
11065 }
11066
11067 int
11068 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11069                         struct mlx5_list_entry *entry, void *cb_ctx)
11070 {
11071         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11072         struct rte_eth_dev *dev = ctx->dev;
11073         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11074         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11075                                                               typeof(*resource),
11076                                                               entry);
11077
11078         if (ctx_resource->ratio == resource->ratio &&
11079             ctx_resource->ft_type == resource->ft_type &&
11080             ctx_resource->ft_id == resource->ft_id &&
11081             ctx_resource->set_action == resource->set_action &&
11082             !memcmp((void *)&ctx_resource->sample_act,
11083                     (void *)&resource->sample_act,
11084                     sizeof(struct mlx5_flow_sub_actions_list))) {
11085                 /*
11086                  * Existing sample action should release the prepared
11087                  * sub-actions reference counter.
11088                  */
11089                 flow_dv_sample_sub_actions_release(dev,
11090                                                    &ctx_resource->sample_idx);
11091                 return 0;
11092         }
11093         return 1;
11094 }
11095
11096 struct mlx5_list_entry *
11097 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11098 {
11099         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11100         struct rte_eth_dev *dev = ctx->dev;
11101         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11102         void **sample_dv_actions = ctx_resource->sub_actions;
11103         struct mlx5_flow_dv_sample_resource *resource;
11104         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11105         struct mlx5_priv *priv = dev->data->dev_private;
11106         struct mlx5_dev_ctx_shared *sh = priv->sh;
11107         struct mlx5_flow_tbl_resource *tbl;
11108         uint32_t idx = 0;
11109         const uint32_t next_ft_step = 1;
11110         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11111         uint8_t is_egress = 0;
11112         uint8_t is_transfer = 0;
11113         struct rte_flow_error *error = ctx->error;
11114
11115         /* Register new sample resource. */
11116         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11117         if (!resource) {
11118                 rte_flow_error_set(error, ENOMEM,
11119                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11120                                           NULL,
11121                                           "cannot allocate resource memory");
11122                 return NULL;
11123         }
11124         *resource = *ctx_resource;
11125         /* Create normal path table level */
11126         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11127                 is_transfer = 1;
11128         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11129                 is_egress = 1;
11130         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11131                                         is_egress, is_transfer,
11132                                         true, NULL, 0, 0, 0, error);
11133         if (!tbl) {
11134                 rte_flow_error_set(error, ENOMEM,
11135                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11136                                           NULL,
11137                                           "fail to create normal path table "
11138                                           "for sample");
11139                 goto error;
11140         }
11141         resource->normal_path_tbl = tbl;
11142         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11143                 if (!sh->default_miss_action) {
11144                         rte_flow_error_set(error, ENOMEM,
11145                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11146                                                 NULL,
11147                                                 "default miss action was not "
11148                                                 "created");
11149                         goto error;
11150                 }
11151                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11152                                                 sh->default_miss_action;
11153         }
11154         /* Create a DR sample action */
11155         sampler_attr.sample_ratio = resource->ratio;
11156         sampler_attr.default_next_table = tbl->obj;
11157         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11158         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11159                                                         &sample_dv_actions[0];
11160         sampler_attr.action = resource->set_action;
11161         if (mlx5_os_flow_dr_create_flow_action_sampler
11162                         (&sampler_attr, &resource->verbs_action)) {
11163                 rte_flow_error_set(error, ENOMEM,
11164                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11165                                         NULL, "cannot create sample action");
11166                 goto error;
11167         }
11168         resource->idx = idx;
11169         resource->dev = dev;
11170         return &resource->entry;
11171 error:
11172         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11173                 flow_dv_sample_sub_actions_release(dev,
11174                                                    &resource->sample_idx);
11175         if (resource->normal_path_tbl)
11176                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11177                                 resource->normal_path_tbl);
11178         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11179         return NULL;
11180
11181 }
11182
11183 struct mlx5_list_entry *
11184 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11185                          struct mlx5_list_entry *entry __rte_unused,
11186                          void *cb_ctx)
11187 {
11188         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11189         struct rte_eth_dev *dev = ctx->dev;
11190         struct mlx5_flow_dv_sample_resource *resource;
11191         struct mlx5_priv *priv = dev->data->dev_private;
11192         struct mlx5_dev_ctx_shared *sh = priv->sh;
11193         uint32_t idx = 0;
11194
11195         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11196         if (!resource) {
11197                 rte_flow_error_set(ctx->error, ENOMEM,
11198                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11199                                           NULL,
11200                                           "cannot allocate resource memory");
11201                 return NULL;
11202         }
11203         memcpy(resource, entry, sizeof(*resource));
11204         resource->idx = idx;
11205         resource->dev = dev;
11206         return &resource->entry;
11207 }
11208
11209 void
11210 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11211                              struct mlx5_list_entry *entry)
11212 {
11213         struct mlx5_flow_dv_sample_resource *resource =
11214                                   container_of(entry, typeof(*resource), entry);
11215         struct rte_eth_dev *dev = resource->dev;
11216         struct mlx5_priv *priv = dev->data->dev_private;
11217
11218         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11219 }
11220
11221 /**
11222  * Find existing sample resource or create and register a new one.
11223  *
11224  * @param[in, out] dev
11225  *   Pointer to rte_eth_dev structure.
11226  * @param[in] ref
11227  *   Pointer to sample resource reference.
11228  * @parm[in, out] dev_flow
11229  *   Pointer to the dev_flow.
11230  * @param[out] error
11231  *   pointer to error structure.
11232  *
11233  * @return
11234  *   0 on success otherwise -errno and errno is set.
11235  */
11236 static int
11237 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11238                          struct mlx5_flow_dv_sample_resource *ref,
11239                          struct mlx5_flow *dev_flow,
11240                          struct rte_flow_error *error)
11241 {
11242         struct mlx5_flow_dv_sample_resource *resource;
11243         struct mlx5_list_entry *entry;
11244         struct mlx5_priv *priv = dev->data->dev_private;
11245         struct mlx5_flow_cb_ctx ctx = {
11246                 .dev = dev,
11247                 .error = error,
11248                 .data = ref,
11249         };
11250
11251         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11252         if (!entry)
11253                 return -rte_errno;
11254         resource = container_of(entry, typeof(*resource), entry);
11255         dev_flow->handle->dvh.rix_sample = resource->idx;
11256         dev_flow->dv.sample_res = resource;
11257         return 0;
11258 }
11259
11260 int
11261 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11262                             struct mlx5_list_entry *entry, void *cb_ctx)
11263 {
11264         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11265         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11266         struct rte_eth_dev *dev = ctx->dev;
11267         struct mlx5_flow_dv_dest_array_resource *resource =
11268                                   container_of(entry, typeof(*resource), entry);
11269         uint32_t idx = 0;
11270
11271         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11272             ctx_resource->ft_type == resource->ft_type &&
11273             !memcmp((void *)resource->sample_act,
11274                     (void *)ctx_resource->sample_act,
11275                    (ctx_resource->num_of_dest *
11276                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11277                 /*
11278                  * Existing sample action should release the prepared
11279                  * sub-actions reference counter.
11280                  */
11281                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11282                         flow_dv_sample_sub_actions_release(dev,
11283                                         &ctx_resource->sample_idx[idx]);
11284                 return 0;
11285         }
11286         return 1;
11287 }
11288
11289 struct mlx5_list_entry *
11290 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11291 {
11292         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11293         struct rte_eth_dev *dev = ctx->dev;
11294         struct mlx5_flow_dv_dest_array_resource *resource;
11295         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11296         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11297         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11298         struct mlx5_priv *priv = dev->data->dev_private;
11299         struct mlx5_dev_ctx_shared *sh = priv->sh;
11300         struct mlx5_flow_sub_actions_list *sample_act;
11301         struct mlx5dv_dr_domain *domain;
11302         uint32_t idx = 0, res_idx = 0;
11303         struct rte_flow_error *error = ctx->error;
11304         uint64_t action_flags;
11305         int ret;
11306
11307         /* Register new destination array resource. */
11308         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11309                                             &res_idx);
11310         if (!resource) {
11311                 rte_flow_error_set(error, ENOMEM,
11312                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11313                                           NULL,
11314                                           "cannot allocate resource memory");
11315                 return NULL;
11316         }
11317         *resource = *ctx_resource;
11318         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11319                 domain = sh->fdb_domain;
11320         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11321                 domain = sh->rx_domain;
11322         else
11323                 domain = sh->tx_domain;
11324         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11325                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11326                                  mlx5_malloc(MLX5_MEM_ZERO,
11327                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11328                                  0, SOCKET_ID_ANY);
11329                 if (!dest_attr[idx]) {
11330                         rte_flow_error_set(error, ENOMEM,
11331                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11332                                            NULL,
11333                                            "cannot allocate resource memory");
11334                         goto error;
11335                 }
11336                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11337                 sample_act = &ctx_resource->sample_act[idx];
11338                 action_flags = sample_act->action_flags;
11339                 switch (action_flags) {
11340                 case MLX5_FLOW_ACTION_QUEUE:
11341                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11342                         break;
11343                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11344                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11345                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11346                         dest_attr[idx]->dest_reformat->reformat =
11347                                         sample_act->dr_encap_action;
11348                         dest_attr[idx]->dest_reformat->dest =
11349                                         sample_act->dr_port_id_action;
11350                         break;
11351                 case MLX5_FLOW_ACTION_PORT_ID:
11352                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11353                         break;
11354                 case MLX5_FLOW_ACTION_JUMP:
11355                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11356                         break;
11357                 default:
11358                         rte_flow_error_set(error, EINVAL,
11359                                            RTE_FLOW_ERROR_TYPE_ACTION,
11360                                            NULL,
11361                                            "unsupported actions type");
11362                         goto error;
11363                 }
11364         }
11365         /* create a dest array actioin */
11366         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11367                                                 (domain,
11368                                                  resource->num_of_dest,
11369                                                  dest_attr,
11370                                                  &resource->action);
11371         if (ret) {
11372                 rte_flow_error_set(error, ENOMEM,
11373                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11374                                    NULL,
11375                                    "cannot create destination array action");
11376                 goto error;
11377         }
11378         resource->idx = res_idx;
11379         resource->dev = dev;
11380         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11381                 mlx5_free(dest_attr[idx]);
11382         return &resource->entry;
11383 error:
11384         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11385                 flow_dv_sample_sub_actions_release(dev,
11386                                                    &resource->sample_idx[idx]);
11387                 if (dest_attr[idx])
11388                         mlx5_free(dest_attr[idx]);
11389         }
11390         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11391         return NULL;
11392 }
11393
11394 struct mlx5_list_entry *
11395 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11396                             struct mlx5_list_entry *entry __rte_unused,
11397                             void *cb_ctx)
11398 {
11399         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11400         struct rte_eth_dev *dev = ctx->dev;
11401         struct mlx5_flow_dv_dest_array_resource *resource;
11402         struct mlx5_priv *priv = dev->data->dev_private;
11403         struct mlx5_dev_ctx_shared *sh = priv->sh;
11404         uint32_t res_idx = 0;
11405         struct rte_flow_error *error = ctx->error;
11406
11407         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11408                                       &res_idx);
11409         if (!resource) {
11410                 rte_flow_error_set(error, ENOMEM,
11411                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11412                                           NULL,
11413                                           "cannot allocate dest-array memory");
11414                 return NULL;
11415         }
11416         memcpy(resource, entry, sizeof(*resource));
11417         resource->idx = res_idx;
11418         resource->dev = dev;
11419         return &resource->entry;
11420 }
11421
11422 void
11423 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11424                                  struct mlx5_list_entry *entry)
11425 {
11426         struct mlx5_flow_dv_dest_array_resource *resource =
11427                         container_of(entry, typeof(*resource), entry);
11428         struct rte_eth_dev *dev = resource->dev;
11429         struct mlx5_priv *priv = dev->data->dev_private;
11430
11431         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11432 }
11433
11434 /**
11435  * Find existing destination array resource or create and register a new one.
11436  *
11437  * @param[in, out] dev
11438  *   Pointer to rte_eth_dev structure.
11439  * @param[in] ref
11440  *   Pointer to destination array resource reference.
11441  * @parm[in, out] dev_flow
11442  *   Pointer to the dev_flow.
11443  * @param[out] error
11444  *   pointer to error structure.
11445  *
11446  * @return
11447  *   0 on success otherwise -errno and errno is set.
11448  */
11449 static int
11450 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11451                          struct mlx5_flow_dv_dest_array_resource *ref,
11452                          struct mlx5_flow *dev_flow,
11453                          struct rte_flow_error *error)
11454 {
11455         struct mlx5_flow_dv_dest_array_resource *resource;
11456         struct mlx5_priv *priv = dev->data->dev_private;
11457         struct mlx5_list_entry *entry;
11458         struct mlx5_flow_cb_ctx ctx = {
11459                 .dev = dev,
11460                 .error = error,
11461                 .data = ref,
11462         };
11463
11464         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11465         if (!entry)
11466                 return -rte_errno;
11467         resource = container_of(entry, typeof(*resource), entry);
11468         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11469         dev_flow->dv.dest_array_res = resource;
11470         return 0;
11471 }
11472
11473 /**
11474  * Convert Sample action to DV specification.
11475  *
11476  * @param[in] dev
11477  *   Pointer to rte_eth_dev structure.
11478  * @param[in] action
11479  *   Pointer to sample action structure.
11480  * @param[in, out] dev_flow
11481  *   Pointer to the mlx5_flow.
11482  * @param[in] attr
11483  *   Pointer to the flow attributes.
11484  * @param[in, out] num_of_dest
11485  *   Pointer to the num of destination.
11486  * @param[in, out] sample_actions
11487  *   Pointer to sample actions list.
11488  * @param[in, out] res
11489  *   Pointer to sample resource.
11490  * @param[out] error
11491  *   Pointer to the error structure.
11492  *
11493  * @return
11494  *   0 on success, a negative errno value otherwise and rte_errno is set.
11495  */
11496 static int
11497 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11498                                 const struct rte_flow_action_sample *action,
11499                                 struct mlx5_flow *dev_flow,
11500                                 const struct rte_flow_attr *attr,
11501                                 uint32_t *num_of_dest,
11502                                 void **sample_actions,
11503                                 struct mlx5_flow_dv_sample_resource *res,
11504                                 struct rte_flow_error *error)
11505 {
11506         struct mlx5_priv *priv = dev->data->dev_private;
11507         const struct rte_flow_action *sub_actions;
11508         struct mlx5_flow_sub_actions_list *sample_act;
11509         struct mlx5_flow_sub_actions_idx *sample_idx;
11510         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11511         struct rte_flow *flow = dev_flow->flow;
11512         struct mlx5_flow_rss_desc *rss_desc;
11513         uint64_t action_flags = 0;
11514
11515         MLX5_ASSERT(wks);
11516         rss_desc = &wks->rss_desc;
11517         sample_act = &res->sample_act;
11518         sample_idx = &res->sample_idx;
11519         res->ratio = action->ratio;
11520         sub_actions = action->actions;
11521         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11522                 int type = sub_actions->type;
11523                 uint32_t pre_rix = 0;
11524                 void *pre_r;
11525                 switch (type) {
11526                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11527                 {
11528                         const struct rte_flow_action_queue *queue;
11529                         struct mlx5_hrxq *hrxq;
11530                         uint32_t hrxq_idx;
11531
11532                         queue = sub_actions->conf;
11533                         rss_desc->queue_num = 1;
11534                         rss_desc->queue[0] = queue->index;
11535                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11536                                                     rss_desc, &hrxq_idx);
11537                         if (!hrxq)
11538                                 return rte_flow_error_set
11539                                         (error, rte_errno,
11540                                          RTE_FLOW_ERROR_TYPE_ACTION,
11541                                          NULL,
11542                                          "cannot create fate queue");
11543                         sample_act->dr_queue_action = hrxq->action;
11544                         sample_idx->rix_hrxq = hrxq_idx;
11545                         sample_actions[sample_act->actions_num++] =
11546                                                 hrxq->action;
11547                         (*num_of_dest)++;
11548                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11549                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11550                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11551                         dev_flow->handle->fate_action =
11552                                         MLX5_FLOW_FATE_QUEUE;
11553                         break;
11554                 }
11555                 case RTE_FLOW_ACTION_TYPE_RSS:
11556                 {
11557                         struct mlx5_hrxq *hrxq;
11558                         uint32_t hrxq_idx;
11559                         const struct rte_flow_action_rss *rss;
11560                         const uint8_t *rss_key;
11561
11562                         rss = sub_actions->conf;
11563                         memcpy(rss_desc->queue, rss->queue,
11564                                rss->queue_num * sizeof(uint16_t));
11565                         rss_desc->queue_num = rss->queue_num;
11566                         /* NULL RSS key indicates default RSS key. */
11567                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11568                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11569                         /*
11570                          * rss->level and rss.types should be set in advance
11571                          * when expanding items for RSS.
11572                          */
11573                         flow_dv_hashfields_set(dev_flow, rss_desc);
11574                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11575                                                     rss_desc, &hrxq_idx);
11576                         if (!hrxq)
11577                                 return rte_flow_error_set
11578                                         (error, rte_errno,
11579                                          RTE_FLOW_ERROR_TYPE_ACTION,
11580                                          NULL,
11581                                          "cannot create fate queue");
11582                         sample_act->dr_queue_action = hrxq->action;
11583                         sample_idx->rix_hrxq = hrxq_idx;
11584                         sample_actions[sample_act->actions_num++] =
11585                                                 hrxq->action;
11586                         (*num_of_dest)++;
11587                         action_flags |= MLX5_FLOW_ACTION_RSS;
11588                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11589                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11590                         dev_flow->handle->fate_action =
11591                                         MLX5_FLOW_FATE_QUEUE;
11592                         break;
11593                 }
11594                 case RTE_FLOW_ACTION_TYPE_MARK:
11595                 {
11596                         uint32_t tag_be = mlx5_flow_mark_set
11597                                 (((const struct rte_flow_action_mark *)
11598                                 (sub_actions->conf))->id);
11599
11600                         dev_flow->handle->mark = 1;
11601                         pre_rix = dev_flow->handle->dvh.rix_tag;
11602                         /* Save the mark resource before sample */
11603                         pre_r = dev_flow->dv.tag_resource;
11604                         if (flow_dv_tag_resource_register(dev, tag_be,
11605                                                   dev_flow, error))
11606                                 return -rte_errno;
11607                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11608                         sample_act->dr_tag_action =
11609                                 dev_flow->dv.tag_resource->action;
11610                         sample_idx->rix_tag =
11611                                 dev_flow->handle->dvh.rix_tag;
11612                         sample_actions[sample_act->actions_num++] =
11613                                                 sample_act->dr_tag_action;
11614                         /* Recover the mark resource after sample */
11615                         dev_flow->dv.tag_resource = pre_r;
11616                         dev_flow->handle->dvh.rix_tag = pre_rix;
11617                         action_flags |= MLX5_FLOW_ACTION_MARK;
11618                         break;
11619                 }
11620                 case RTE_FLOW_ACTION_TYPE_COUNT:
11621                 {
11622                         if (!flow->counter) {
11623                                 flow->counter =
11624                                         flow_dv_translate_create_counter(dev,
11625                                                 dev_flow, sub_actions->conf,
11626                                                 0);
11627                                 if (!flow->counter)
11628                                         return rte_flow_error_set
11629                                                 (error, rte_errno,
11630                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11631                                                 NULL,
11632                                                 "cannot create counter"
11633                                                 " object.");
11634                         }
11635                         sample_act->dr_cnt_action =
11636                                   (flow_dv_counter_get_by_idx(dev,
11637                                   flow->counter, NULL))->action;
11638                         sample_actions[sample_act->actions_num++] =
11639                                                 sample_act->dr_cnt_action;
11640                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11641                         break;
11642                 }
11643                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11644                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11645                 {
11646                         struct mlx5_flow_dv_port_id_action_resource
11647                                         port_id_resource;
11648                         uint32_t port_id = 0;
11649
11650                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11651                         /* Save the port id resource before sample */
11652                         pre_rix = dev_flow->handle->rix_port_id_action;
11653                         pre_r = dev_flow->dv.port_id_action;
11654                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11655                                                              &port_id, error))
11656                                 return -rte_errno;
11657                         port_id_resource.port_id = port_id;
11658                         if (flow_dv_port_id_action_resource_register
11659                             (dev, &port_id_resource, dev_flow, error))
11660                                 return -rte_errno;
11661                         sample_act->dr_port_id_action =
11662                                 dev_flow->dv.port_id_action->action;
11663                         sample_idx->rix_port_id_action =
11664                                 dev_flow->handle->rix_port_id_action;
11665                         sample_actions[sample_act->actions_num++] =
11666                                                 sample_act->dr_port_id_action;
11667                         /* Recover the port id resource after sample */
11668                         dev_flow->dv.port_id_action = pre_r;
11669                         dev_flow->handle->rix_port_id_action = pre_rix;
11670                         (*num_of_dest)++;
11671                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11672                         break;
11673                 }
11674                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11675                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11676                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11677                         /* Save the encap resource before sample */
11678                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11679                         pre_r = dev_flow->dv.encap_decap;
11680                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11681                                                            dev_flow,
11682                                                            attr->transfer,
11683                                                            error))
11684                                 return -rte_errno;
11685                         sample_act->dr_encap_action =
11686                                 dev_flow->dv.encap_decap->action;
11687                         sample_idx->rix_encap_decap =
11688                                 dev_flow->handle->dvh.rix_encap_decap;
11689                         sample_actions[sample_act->actions_num++] =
11690                                                 sample_act->dr_encap_action;
11691                         /* Recover the encap resource after sample */
11692                         dev_flow->dv.encap_decap = pre_r;
11693                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11694                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11695                         break;
11696                 default:
11697                         return rte_flow_error_set(error, EINVAL,
11698                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11699                                 NULL,
11700                                 "Not support for sampler action");
11701                 }
11702         }
11703         sample_act->action_flags = action_flags;
11704         res->ft_id = dev_flow->dv.group;
11705         if (attr->transfer) {
11706                 union {
11707                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11708                         uint64_t set_action;
11709                 } action_ctx = { .set_action = 0 };
11710
11711                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11712                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11713                          MLX5_MODIFICATION_TYPE_SET);
11714                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11715                          MLX5_MODI_META_REG_C_0);
11716                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11717                          priv->vport_meta_tag);
11718                 res->set_action = action_ctx.set_action;
11719         } else if (attr->ingress) {
11720                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11721         } else {
11722                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11723         }
11724         return 0;
11725 }
11726
11727 /**
11728  * Convert Sample action to DV specification.
11729  *
11730  * @param[in] dev
11731  *   Pointer to rte_eth_dev structure.
11732  * @param[in, out] dev_flow
11733  *   Pointer to the mlx5_flow.
11734  * @param[in] num_of_dest
11735  *   The num of destination.
11736  * @param[in, out] res
11737  *   Pointer to sample resource.
11738  * @param[in, out] mdest_res
11739  *   Pointer to destination array resource.
11740  * @param[in] sample_actions
11741  *   Pointer to sample path actions list.
11742  * @param[in] action_flags
11743  *   Holds the actions detected until now.
11744  * @param[out] error
11745  *   Pointer to the error structure.
11746  *
11747  * @return
11748  *   0 on success, a negative errno value otherwise and rte_errno is set.
11749  */
11750 static int
11751 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11752                              struct mlx5_flow *dev_flow,
11753                              uint32_t num_of_dest,
11754                              struct mlx5_flow_dv_sample_resource *res,
11755                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11756                              void **sample_actions,
11757                              uint64_t action_flags,
11758                              struct rte_flow_error *error)
11759 {
11760         /* update normal path action resource into last index of array */
11761         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11762         struct mlx5_flow_sub_actions_list *sample_act =
11763                                         &mdest_res->sample_act[dest_index];
11764         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11765         struct mlx5_flow_rss_desc *rss_desc;
11766         uint32_t normal_idx = 0;
11767         struct mlx5_hrxq *hrxq;
11768         uint32_t hrxq_idx;
11769
11770         MLX5_ASSERT(wks);
11771         rss_desc = &wks->rss_desc;
11772         if (num_of_dest > 1) {
11773                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11774                         /* Handle QP action for mirroring */
11775                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11776                                                     rss_desc, &hrxq_idx);
11777                         if (!hrxq)
11778                                 return rte_flow_error_set
11779                                      (error, rte_errno,
11780                                       RTE_FLOW_ERROR_TYPE_ACTION,
11781                                       NULL,
11782                                       "cannot create rx queue");
11783                         normal_idx++;
11784                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11785                         sample_act->dr_queue_action = hrxq->action;
11786                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11787                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11788                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11789                 }
11790                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11791                         normal_idx++;
11792                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11793                                 dev_flow->handle->dvh.rix_encap_decap;
11794                         sample_act->dr_encap_action =
11795                                 dev_flow->dv.encap_decap->action;
11796                         dev_flow->handle->dvh.rix_encap_decap = 0;
11797                 }
11798                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11799                         normal_idx++;
11800                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11801                                 dev_flow->handle->rix_port_id_action;
11802                         sample_act->dr_port_id_action =
11803                                 dev_flow->dv.port_id_action->action;
11804                         dev_flow->handle->rix_port_id_action = 0;
11805                 }
11806                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11807                         normal_idx++;
11808                         mdest_res->sample_idx[dest_index].rix_jump =
11809                                 dev_flow->handle->rix_jump;
11810                         sample_act->dr_jump_action =
11811                                 dev_flow->dv.jump->action;
11812                         dev_flow->handle->rix_jump = 0;
11813                 }
11814                 sample_act->actions_num = normal_idx;
11815                 /* update sample action resource into first index of array */
11816                 mdest_res->ft_type = res->ft_type;
11817                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11818                                 sizeof(struct mlx5_flow_sub_actions_idx));
11819                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11820                                 sizeof(struct mlx5_flow_sub_actions_list));
11821                 mdest_res->num_of_dest = num_of_dest;
11822                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11823                                                          dev_flow, error))
11824                         return rte_flow_error_set(error, EINVAL,
11825                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11826                                                   NULL, "can't create sample "
11827                                                   "action");
11828         } else {
11829                 res->sub_actions = sample_actions;
11830                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11831                         return rte_flow_error_set(error, EINVAL,
11832                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11833                                                   NULL,
11834                                                   "can't create sample action");
11835         }
11836         return 0;
11837 }
11838
11839 /**
11840  * Remove an ASO age action from age actions list.
11841  *
11842  * @param[in] dev
11843  *   Pointer to the Ethernet device structure.
11844  * @param[in] age
11845  *   Pointer to the aso age action handler.
11846  */
11847 static void
11848 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11849                                 struct mlx5_aso_age_action *age)
11850 {
11851         struct mlx5_age_info *age_info;
11852         struct mlx5_age_param *age_param = &age->age_params;
11853         struct mlx5_priv *priv = dev->data->dev_private;
11854         uint16_t expected = AGE_CANDIDATE;
11855
11856         age_info = GET_PORT_AGE_INFO(priv);
11857         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11858                                          AGE_FREE, false, __ATOMIC_RELAXED,
11859                                          __ATOMIC_RELAXED)) {
11860                 /**
11861                  * We need the lock even it is age timeout,
11862                  * since age action may still in process.
11863                  */
11864                 rte_spinlock_lock(&age_info->aged_sl);
11865                 LIST_REMOVE(age, next);
11866                 rte_spinlock_unlock(&age_info->aged_sl);
11867                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11868         }
11869 }
11870
11871 /**
11872  * Release an ASO age action.
11873  *
11874  * @param[in] dev
11875  *   Pointer to the Ethernet device structure.
11876  * @param[in] age_idx
11877  *   Index of ASO age action to release.
11878  * @param[in] flow
11879  *   True if the release operation is during flow destroy operation.
11880  *   False if the release operation is during action destroy operation.
11881  *
11882  * @return
11883  *   0 when age action was removed, otherwise the number of references.
11884  */
11885 static int
11886 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11887 {
11888         struct mlx5_priv *priv = dev->data->dev_private;
11889         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11890         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11891         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11892
11893         if (!ret) {
11894                 flow_dv_aso_age_remove_from_age(dev, age);
11895                 rte_spinlock_lock(&mng->free_sl);
11896                 LIST_INSERT_HEAD(&mng->free, age, next);
11897                 rte_spinlock_unlock(&mng->free_sl);
11898         }
11899         return ret;
11900 }
11901
11902 /**
11903  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11904  *
11905  * @param[in] dev
11906  *   Pointer to the Ethernet device structure.
11907  *
11908  * @return
11909  *   0 on success, otherwise negative errno value and rte_errno is set.
11910  */
11911 static int
11912 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11913 {
11914         struct mlx5_priv *priv = dev->data->dev_private;
11915         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11916         void *old_pools = mng->pools;
11917         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11918         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11919         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11920
11921         if (!pools) {
11922                 rte_errno = ENOMEM;
11923                 return -ENOMEM;
11924         }
11925         if (old_pools) {
11926                 memcpy(pools, old_pools,
11927                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11928                 mlx5_free(old_pools);
11929         } else {
11930                 /* First ASO flow hit allocation - starting ASO data-path. */
11931                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11932
11933                 if (ret) {
11934                         mlx5_free(pools);
11935                         return ret;
11936                 }
11937         }
11938         mng->n = resize;
11939         mng->pools = pools;
11940         return 0;
11941 }
11942
11943 /**
11944  * Create and initialize a new ASO aging pool.
11945  *
11946  * @param[in] dev
11947  *   Pointer to the Ethernet device structure.
11948  * @param[out] age_free
11949  *   Where to put the pointer of a new age action.
11950  *
11951  * @return
11952  *   The age actions pool pointer and @p age_free is set on success,
11953  *   NULL otherwise and rte_errno is set.
11954  */
11955 static struct mlx5_aso_age_pool *
11956 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11957                         struct mlx5_aso_age_action **age_free)
11958 {
11959         struct mlx5_priv *priv = dev->data->dev_private;
11960         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11961         struct mlx5_aso_age_pool *pool = NULL;
11962         struct mlx5_devx_obj *obj = NULL;
11963         uint32_t i;
11964
11965         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
11966                                                     priv->sh->cdev->pdn);
11967         if (!obj) {
11968                 rte_errno = ENODATA;
11969                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11970                 return NULL;
11971         }
11972         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11973         if (!pool) {
11974                 claim_zero(mlx5_devx_cmd_destroy(obj));
11975                 rte_errno = ENOMEM;
11976                 return NULL;
11977         }
11978         pool->flow_hit_aso_obj = obj;
11979         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11980         rte_rwlock_write_lock(&mng->resize_rwl);
11981         pool->index = mng->next;
11982         /* Resize pools array if there is no room for the new pool in it. */
11983         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11984                 claim_zero(mlx5_devx_cmd_destroy(obj));
11985                 mlx5_free(pool);
11986                 rte_rwlock_write_unlock(&mng->resize_rwl);
11987                 return NULL;
11988         }
11989         mng->pools[pool->index] = pool;
11990         mng->next++;
11991         rte_rwlock_write_unlock(&mng->resize_rwl);
11992         /* Assign the first action in the new pool, the rest go to free list. */
11993         *age_free = &pool->actions[0];
11994         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11995                 pool->actions[i].offset = i;
11996                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11997         }
11998         return pool;
11999 }
12000
12001 /**
12002  * Allocate a ASO aging bit.
12003  *
12004  * @param[in] dev
12005  *   Pointer to the Ethernet device structure.
12006  * @param[out] error
12007  *   Pointer to the error structure.
12008  *
12009  * @return
12010  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12011  */
12012 static uint32_t
12013 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12014 {
12015         struct mlx5_priv *priv = dev->data->dev_private;
12016         const struct mlx5_aso_age_pool *pool;
12017         struct mlx5_aso_age_action *age_free = NULL;
12018         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12019
12020         MLX5_ASSERT(mng);
12021         /* Try to get the next free age action bit. */
12022         rte_spinlock_lock(&mng->free_sl);
12023         age_free = LIST_FIRST(&mng->free);
12024         if (age_free) {
12025                 LIST_REMOVE(age_free, next);
12026         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12027                 rte_spinlock_unlock(&mng->free_sl);
12028                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12029                                    NULL, "failed to create ASO age pool");
12030                 return 0; /* 0 is an error. */
12031         }
12032         rte_spinlock_unlock(&mng->free_sl);
12033         pool = container_of
12034           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12035                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12036                                                                        actions);
12037         if (!age_free->dr_action) {
12038                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12039                                                  error);
12040
12041                 if (reg_c < 0) {
12042                         rte_flow_error_set(error, rte_errno,
12043                                            RTE_FLOW_ERROR_TYPE_ACTION,
12044                                            NULL, "failed to get reg_c "
12045                                            "for ASO flow hit");
12046                         return 0; /* 0 is an error. */
12047                 }
12048 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12049                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12050                                 (priv->sh->rx_domain,
12051                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12052                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12053                                  (reg_c - REG_C_0));
12054 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12055                 if (!age_free->dr_action) {
12056                         rte_errno = errno;
12057                         rte_spinlock_lock(&mng->free_sl);
12058                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12059                         rte_spinlock_unlock(&mng->free_sl);
12060                         rte_flow_error_set(error, rte_errno,
12061                                            RTE_FLOW_ERROR_TYPE_ACTION,
12062                                            NULL, "failed to create ASO "
12063                                            "flow hit action");
12064                         return 0; /* 0 is an error. */
12065                 }
12066         }
12067         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12068         return pool->index | ((age_free->offset + 1) << 16);
12069 }
12070
12071 /**
12072  * Initialize flow ASO age parameters.
12073  *
12074  * @param[in] dev
12075  *   Pointer to rte_eth_dev structure.
12076  * @param[in] age_idx
12077  *   Index of ASO age action.
12078  * @param[in] context
12079  *   Pointer to flow counter age context.
12080  * @param[in] timeout
12081  *   Aging timeout in seconds.
12082  *
12083  */
12084 static void
12085 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12086                             uint32_t age_idx,
12087                             void *context,
12088                             uint32_t timeout)
12089 {
12090         struct mlx5_aso_age_action *aso_age;
12091
12092         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12093         MLX5_ASSERT(aso_age);
12094         aso_age->age_params.context = context;
12095         aso_age->age_params.timeout = timeout;
12096         aso_age->age_params.port_id = dev->data->port_id;
12097         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12098                          __ATOMIC_RELAXED);
12099         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12100                          __ATOMIC_RELAXED);
12101 }
12102
12103 static void
12104 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12105                                const struct rte_flow_item_integrity *value,
12106                                void *headers_m, void *headers_v)
12107 {
12108         if (mask->l4_ok) {
12109                 /* RTE l4_ok filter aggregates hardware l4_ok and
12110                  * l4_checksum_ok filters.
12111                  * Positive RTE l4_ok match requires hardware match on both L4
12112                  * hardware integrity bits.
12113                  * For negative match, check hardware l4_checksum_ok bit only,
12114                  * because hardware sets that bit to 0 for all packets
12115                  * with bad L4.
12116                  */
12117                 if (value->l4_ok) {
12118                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12119                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12120                 }
12121                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12122                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12123                          !!value->l4_ok);
12124         }
12125         if (mask->l4_csum_ok) {
12126                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12127                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12128                          value->l4_csum_ok);
12129         }
12130 }
12131
12132 static void
12133 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12134                                const struct rte_flow_item_integrity *value,
12135                                void *headers_m, void *headers_v, bool is_ipv4)
12136 {
12137         if (mask->l3_ok) {
12138                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12139                  * ipv4_csum_ok filters.
12140                  * Positive RTE l3_ok match requires hardware match on both L3
12141                  * hardware integrity bits.
12142                  * For negative match, check hardware l3_csum_ok bit only,
12143                  * because hardware sets that bit to 0 for all packets
12144                  * with bad L3.
12145                  */
12146                 if (is_ipv4) {
12147                         if (value->l3_ok) {
12148                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12149                                          l3_ok, 1);
12150                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12151                                          l3_ok, 1);
12152                         }
12153                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12154                                  ipv4_checksum_ok, 1);
12155                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12156                                  ipv4_checksum_ok, !!value->l3_ok);
12157                 } else {
12158                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12159                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12160                                  value->l3_ok);
12161                 }
12162         }
12163         if (mask->ipv4_csum_ok) {
12164                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12165                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12166                          value->ipv4_csum_ok);
12167         }
12168 }
12169
12170 static void
12171 set_integrity_bits(void *headers_m, void *headers_v,
12172                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12173 {
12174         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12175         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12176
12177         /* Integrity bits validation cleared spec pointer */
12178         MLX5_ASSERT(spec != NULL);
12179         if (!mask)
12180                 mask = &rte_flow_item_integrity_mask;
12181         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12182                                        is_l3_ip4);
12183         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12184 }
12185
12186 static void
12187 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12188                                       const
12189                                       struct rte_flow_item *integrity_items[2],
12190                                       uint64_t pattern_flags)
12191 {
12192         void *headers_m, *headers_v;
12193         bool is_l3_ip4;
12194
12195         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12196                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12197                                          inner_headers);
12198                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12199                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12200                             0;
12201                 set_integrity_bits(headers_m, headers_v,
12202                                    integrity_items[1], is_l3_ip4);
12203         }
12204         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12205                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12206                                          outer_headers);
12207                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12208                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12209                             0;
12210                 set_integrity_bits(headers_m, headers_v,
12211                                    integrity_items[0], is_l3_ip4);
12212         }
12213 }
12214
12215 static void
12216 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12217                                  const struct rte_flow_item *integrity_items[2],
12218                                  uint64_t *last_item)
12219 {
12220         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12221
12222         /* integrity bits validation cleared spec pointer */
12223         MLX5_ASSERT(spec != NULL);
12224         if (spec->level > 1) {
12225                 integrity_items[1] = item;
12226                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12227         } else {
12228                 integrity_items[0] = item;
12229                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12230         }
12231 }
12232
12233 /**
12234  * Prepares DV flow counter with aging configuration.
12235  * Gets it by index when exists, creates a new one when doesn't.
12236  *
12237  * @param[in] dev
12238  *   Pointer to rte_eth_dev structure.
12239  * @param[in] dev_flow
12240  *   Pointer to the mlx5_flow.
12241  * @param[in, out] flow
12242  *   Pointer to the sub flow.
12243  * @param[in] count
12244  *   Pointer to the counter action configuration.
12245  * @param[in] age
12246  *   Pointer to the aging action configuration.
12247  * @param[out] error
12248  *   Pointer to the error structure.
12249  *
12250  * @return
12251  *   Pointer to the counter, NULL otherwise.
12252  */
12253 static struct mlx5_flow_counter *
12254 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12255                         struct mlx5_flow *dev_flow,
12256                         struct rte_flow *flow,
12257                         const struct rte_flow_action_count *count,
12258                         const struct rte_flow_action_age *age,
12259                         struct rte_flow_error *error)
12260 {
12261         if (!flow->counter) {
12262                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12263                                                                  count, age);
12264                 if (!flow->counter) {
12265                         rte_flow_error_set(error, rte_errno,
12266                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12267                                            "cannot create counter object.");
12268                         return NULL;
12269                 }
12270         }
12271         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12272 }
12273
12274 /*
12275  * Release an ASO CT action by its own device.
12276  *
12277  * @param[in] dev
12278  *   Pointer to the Ethernet device structure.
12279  * @param[in] idx
12280  *   Index of ASO CT action to release.
12281  *
12282  * @return
12283  *   0 when CT action was removed, otherwise the number of references.
12284  */
12285 static inline int
12286 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12287 {
12288         struct mlx5_priv *priv = dev->data->dev_private;
12289         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12290         uint32_t ret;
12291         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12292         enum mlx5_aso_ct_state state =
12293                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12294
12295         /* Cannot release when CT is in the ASO SQ. */
12296         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12297                 return -1;
12298         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12299         if (!ret) {
12300                 if (ct->dr_action_orig) {
12301 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12302                         claim_zero(mlx5_glue->destroy_flow_action
12303                                         (ct->dr_action_orig));
12304 #endif
12305                         ct->dr_action_orig = NULL;
12306                 }
12307                 if (ct->dr_action_rply) {
12308 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12309                         claim_zero(mlx5_glue->destroy_flow_action
12310                                         (ct->dr_action_rply));
12311 #endif
12312                         ct->dr_action_rply = NULL;
12313                 }
12314                 /* Clear the state to free, no need in 1st allocation. */
12315                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12316                 rte_spinlock_lock(&mng->ct_sl);
12317                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12318                 rte_spinlock_unlock(&mng->ct_sl);
12319         }
12320         return (int)ret;
12321 }
12322
12323 static inline int
12324 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12325                        struct rte_flow_error *error)
12326 {
12327         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12328         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12329         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12330         int ret;
12331
12332         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12333         if (dev->data->dev_started != 1)
12334                 return rte_flow_error_set(error, EAGAIN,
12335                                           RTE_FLOW_ERROR_TYPE_ACTION,
12336                                           NULL,
12337                                           "Indirect CT action cannot be destroyed when the port is stopped");
12338         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12339         if (ret < 0)
12340                 return rte_flow_error_set(error, EAGAIN,
12341                                           RTE_FLOW_ERROR_TYPE_ACTION,
12342                                           NULL,
12343                                           "Current state prevents indirect CT action from being destroyed");
12344         return ret;
12345 }
12346
12347 /*
12348  * Resize the ASO CT pools array by 64 pools.
12349  *
12350  * @param[in] dev
12351  *   Pointer to the Ethernet device structure.
12352  *
12353  * @return
12354  *   0 on success, otherwise negative errno value and rte_errno is set.
12355  */
12356 static int
12357 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12358 {
12359         struct mlx5_priv *priv = dev->data->dev_private;
12360         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12361         void *old_pools = mng->pools;
12362         /* Magic number now, need a macro. */
12363         uint32_t resize = mng->n + 64;
12364         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12365         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12366
12367         if (!pools) {
12368                 rte_errno = ENOMEM;
12369                 return -rte_errno;
12370         }
12371         rte_rwlock_write_lock(&mng->resize_rwl);
12372         /* ASO SQ/QP was already initialized in the startup. */
12373         if (old_pools) {
12374                 /* Realloc could be an alternative choice. */
12375                 rte_memcpy(pools, old_pools,
12376                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12377                 mlx5_free(old_pools);
12378         }
12379         mng->n = resize;
12380         mng->pools = pools;
12381         rte_rwlock_write_unlock(&mng->resize_rwl);
12382         return 0;
12383 }
12384
12385 /*
12386  * Create and initialize a new ASO CT pool.
12387  *
12388  * @param[in] dev
12389  *   Pointer to the Ethernet device structure.
12390  * @param[out] ct_free
12391  *   Where to put the pointer of a new CT action.
12392  *
12393  * @return
12394  *   The CT actions pool pointer and @p ct_free is set on success,
12395  *   NULL otherwise and rte_errno is set.
12396  */
12397 static struct mlx5_aso_ct_pool *
12398 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12399                        struct mlx5_aso_ct_action **ct_free)
12400 {
12401         struct mlx5_priv *priv = dev->data->dev_private;
12402         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12403         struct mlx5_aso_ct_pool *pool = NULL;
12404         struct mlx5_devx_obj *obj = NULL;
12405         uint32_t i;
12406         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12407
12408         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12409                                                           priv->sh->cdev->pdn,
12410                                                           log_obj_size);
12411         if (!obj) {
12412                 rte_errno = ENODATA;
12413                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12414                 return NULL;
12415         }
12416         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12417         if (!pool) {
12418                 rte_errno = ENOMEM;
12419                 claim_zero(mlx5_devx_cmd_destroy(obj));
12420                 return NULL;
12421         }
12422         pool->devx_obj = obj;
12423         pool->index = mng->next;
12424         /* Resize pools array if there is no room for the new pool in it. */
12425         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12426                 claim_zero(mlx5_devx_cmd_destroy(obj));
12427                 mlx5_free(pool);
12428                 return NULL;
12429         }
12430         mng->pools[pool->index] = pool;
12431         mng->next++;
12432         /* Assign the first action in the new pool, the rest go to free list. */
12433         *ct_free = &pool->actions[0];
12434         /* Lock outside, the list operation is safe here. */
12435         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12436                 /* refcnt is 0 when allocating the memory. */
12437                 pool->actions[i].offset = i;
12438                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12439         }
12440         return pool;
12441 }
12442
12443 /*
12444  * Allocate a ASO CT action from free list.
12445  *
12446  * @param[in] dev
12447  *   Pointer to the Ethernet device structure.
12448  * @param[out] error
12449  *   Pointer to the error structure.
12450  *
12451  * @return
12452  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12453  */
12454 static uint32_t
12455 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12456 {
12457         struct mlx5_priv *priv = dev->data->dev_private;
12458         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12459         struct mlx5_aso_ct_action *ct = NULL;
12460         struct mlx5_aso_ct_pool *pool;
12461         uint8_t reg_c;
12462         uint32_t ct_idx;
12463
12464         MLX5_ASSERT(mng);
12465         if (!priv->sh->devx) {
12466                 rte_errno = ENOTSUP;
12467                 return 0;
12468         }
12469         /* Get a free CT action, if no, a new pool will be created. */
12470         rte_spinlock_lock(&mng->ct_sl);
12471         ct = LIST_FIRST(&mng->free_cts);
12472         if (ct) {
12473                 LIST_REMOVE(ct, next);
12474         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12475                 rte_spinlock_unlock(&mng->ct_sl);
12476                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12477                                    NULL, "failed to create ASO CT pool");
12478                 return 0;
12479         }
12480         rte_spinlock_unlock(&mng->ct_sl);
12481         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12482         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12483         /* 0: inactive, 1: created, 2+: used by flows. */
12484         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12485         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12486         if (!ct->dr_action_orig) {
12487 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12488                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12489                         (priv->sh->rx_domain, pool->devx_obj->obj,
12490                          ct->offset,
12491                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12492                          reg_c - REG_C_0);
12493 #else
12494                 RTE_SET_USED(reg_c);
12495 #endif
12496                 if (!ct->dr_action_orig) {
12497                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12498                         rte_flow_error_set(error, rte_errno,
12499                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12500                                            "failed to create ASO CT action");
12501                         return 0;
12502                 }
12503         }
12504         if (!ct->dr_action_rply) {
12505 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12506                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12507                         (priv->sh->rx_domain, pool->devx_obj->obj,
12508                          ct->offset,
12509                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12510                          reg_c - REG_C_0);
12511 #endif
12512                 if (!ct->dr_action_rply) {
12513                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12514                         rte_flow_error_set(error, rte_errno,
12515                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12516                                            "failed to create ASO CT action");
12517                         return 0;
12518                 }
12519         }
12520         return ct_idx;
12521 }
12522
12523 /*
12524  * Create a conntrack object with context and actions by using ASO mechanism.
12525  *
12526  * @param[in] dev
12527  *   Pointer to rte_eth_dev structure.
12528  * @param[in] pro
12529  *   Pointer to conntrack information profile.
12530  * @param[out] error
12531  *   Pointer to the error structure.
12532  *
12533  * @return
12534  *   Index to conntrack object on success, 0 otherwise.
12535  */
12536 static uint32_t
12537 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12538                                    const struct rte_flow_action_conntrack *pro,
12539                                    struct rte_flow_error *error)
12540 {
12541         struct mlx5_priv *priv = dev->data->dev_private;
12542         struct mlx5_dev_ctx_shared *sh = priv->sh;
12543         struct mlx5_aso_ct_action *ct;
12544         uint32_t idx;
12545
12546         if (!sh->ct_aso_en)
12547                 return rte_flow_error_set(error, ENOTSUP,
12548                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12549                                           "Connection is not supported");
12550         idx = flow_dv_aso_ct_alloc(dev, error);
12551         if (!idx)
12552                 return rte_flow_error_set(error, rte_errno,
12553                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12554                                           "Failed to allocate CT object");
12555         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12556         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12557                 return rte_flow_error_set(error, EBUSY,
12558                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12559                                           "Failed to update CT");
12560         ct->is_original = !!pro->is_original_dir;
12561         ct->peer = pro->peer_port;
12562         return idx;
12563 }
12564
12565 /**
12566  * Fill the flow with DV spec, lock free
12567  * (mutex should be acquired by caller).
12568  *
12569  * @param[in] dev
12570  *   Pointer to rte_eth_dev structure.
12571  * @param[in, out] dev_flow
12572  *   Pointer to the sub flow.
12573  * @param[in] attr
12574  *   Pointer to the flow attributes.
12575  * @param[in] items
12576  *   Pointer to the list of items.
12577  * @param[in] actions
12578  *   Pointer to the list of actions.
12579  * @param[out] error
12580  *   Pointer to the error structure.
12581  *
12582  * @return
12583  *   0 on success, a negative errno value otherwise and rte_errno is set.
12584  */
12585 static int
12586 flow_dv_translate(struct rte_eth_dev *dev,
12587                   struct mlx5_flow *dev_flow,
12588                   const struct rte_flow_attr *attr,
12589                   const struct rte_flow_item items[],
12590                   const struct rte_flow_action actions[],
12591                   struct rte_flow_error *error)
12592 {
12593         struct mlx5_priv *priv = dev->data->dev_private;
12594         struct mlx5_dev_config *dev_conf = &priv->config;
12595         struct rte_flow *flow = dev_flow->flow;
12596         struct mlx5_flow_handle *handle = dev_flow->handle;
12597         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12598         struct mlx5_flow_rss_desc *rss_desc;
12599         uint64_t item_flags = 0;
12600         uint64_t last_item = 0;
12601         uint64_t action_flags = 0;
12602         struct mlx5_flow_dv_matcher matcher = {
12603                 .mask = {
12604                         .size = sizeof(matcher.mask.buf),
12605                 },
12606         };
12607         int actions_n = 0;
12608         bool actions_end = false;
12609         union {
12610                 struct mlx5_flow_dv_modify_hdr_resource res;
12611                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12612                             sizeof(struct mlx5_modification_cmd) *
12613                             (MLX5_MAX_MODIFY_NUM + 1)];
12614         } mhdr_dummy;
12615         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12616         const struct rte_flow_action_count *count = NULL;
12617         const struct rte_flow_action_age *non_shared_age = NULL;
12618         union flow_dv_attr flow_attr = { .attr = 0 };
12619         uint32_t tag_be;
12620         union mlx5_flow_tbl_key tbl_key;
12621         uint32_t modify_action_position = UINT32_MAX;
12622         void *match_mask = matcher.mask.buf;
12623         void *match_value = dev_flow->dv.value.buf;
12624         uint8_t next_protocol = 0xff;
12625         struct rte_vlan_hdr vlan = { 0 };
12626         struct mlx5_flow_dv_dest_array_resource mdest_res;
12627         struct mlx5_flow_dv_sample_resource sample_res;
12628         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12629         const struct rte_flow_action_sample *sample = NULL;
12630         struct mlx5_flow_sub_actions_list *sample_act;
12631         uint32_t sample_act_pos = UINT32_MAX;
12632         uint32_t age_act_pos = UINT32_MAX;
12633         uint32_t num_of_dest = 0;
12634         int tmp_actions_n = 0;
12635         uint32_t table;
12636         int ret = 0;
12637         const struct mlx5_flow_tunnel *tunnel = NULL;
12638         struct flow_grp_info grp_info = {
12639                 .external = !!dev_flow->external,
12640                 .transfer = !!attr->transfer,
12641                 .fdb_def_rule = !!priv->fdb_def_rule,
12642                 .skip_scale = dev_flow->skip_scale &
12643                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12644                 .std_tbl_fix = true,
12645         };
12646         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12647
12648         if (!wks)
12649                 return rte_flow_error_set(error, ENOMEM,
12650                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12651                                           NULL,
12652                                           "failed to push flow workspace");
12653         rss_desc = &wks->rss_desc;
12654         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12655         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12656         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12657                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12658         /* update normal path action resource into last index of array */
12659         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12660         if (is_tunnel_offload_active(dev)) {
12661                 if (dev_flow->tunnel) {
12662                         RTE_VERIFY(dev_flow->tof_type ==
12663                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12664                         tunnel = dev_flow->tunnel;
12665                 } else {
12666                         tunnel = mlx5_get_tof(items, actions,
12667                                               &dev_flow->tof_type);
12668                         dev_flow->tunnel = tunnel;
12669                 }
12670                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12671                                         (dev, attr, tunnel, dev_flow->tof_type);
12672         }
12673         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12674                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12675         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12676                                        &grp_info, error);
12677         if (ret)
12678                 return ret;
12679         dev_flow->dv.group = table;
12680         if (attr->transfer)
12681                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12682         /* number of actions must be set to 0 in case of dirty stack. */
12683         mhdr_res->actions_num = 0;
12684         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12685                 /*
12686                  * do not add decap action if match rule drops packet
12687                  * HW rejects rules with decap & drop
12688                  *
12689                  * if tunnel match rule was inserted before matching tunnel set
12690                  * rule flow table used in the match rule must be registered.
12691                  * current implementation handles that in the
12692                  * flow_dv_match_register() at the function end.
12693                  */
12694                 bool add_decap = true;
12695                 const struct rte_flow_action *ptr = actions;
12696
12697                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12698                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12699                                 add_decap = false;
12700                                 break;
12701                         }
12702                 }
12703                 if (add_decap) {
12704                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12705                                                            attr->transfer,
12706                                                            error))
12707                                 return -rte_errno;
12708                         dev_flow->dv.actions[actions_n++] =
12709                                         dev_flow->dv.encap_decap->action;
12710                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12711                 }
12712         }
12713         for (; !actions_end ; actions++) {
12714                 const struct rte_flow_action_queue *queue;
12715                 const struct rte_flow_action_rss *rss;
12716                 const struct rte_flow_action *action = actions;
12717                 const uint8_t *rss_key;
12718                 struct mlx5_flow_tbl_resource *tbl;
12719                 struct mlx5_aso_age_action *age_act;
12720                 struct mlx5_flow_counter *cnt_act;
12721                 uint32_t port_id = 0;
12722                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12723                 int action_type = actions->type;
12724                 const struct rte_flow_action *found_action = NULL;
12725                 uint32_t jump_group = 0;
12726                 uint32_t owner_idx;
12727                 struct mlx5_aso_ct_action *ct;
12728
12729                 if (!mlx5_flow_os_action_supported(action_type))
12730                         return rte_flow_error_set(error, ENOTSUP,
12731                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12732                                                   actions,
12733                                                   "action not supported");
12734                 switch (action_type) {
12735                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12736                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12737                         break;
12738                 case RTE_FLOW_ACTION_TYPE_VOID:
12739                         break;
12740                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12741                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12742                         if (flow_dv_translate_action_port_id(dev, action,
12743                                                              &port_id, error))
12744                                 return -rte_errno;
12745                         port_id_resource.port_id = port_id;
12746                         MLX5_ASSERT(!handle->rix_port_id_action);
12747                         if (flow_dv_port_id_action_resource_register
12748                             (dev, &port_id_resource, dev_flow, error))
12749                                 return -rte_errno;
12750                         dev_flow->dv.actions[actions_n++] =
12751                                         dev_flow->dv.port_id_action->action;
12752                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12753                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12754                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12755                         num_of_dest++;
12756                         break;
12757                 case RTE_FLOW_ACTION_TYPE_FLAG:
12758                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12759                         dev_flow->handle->mark = 1;
12760                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12761                                 struct rte_flow_action_mark mark = {
12762                                         .id = MLX5_FLOW_MARK_DEFAULT,
12763                                 };
12764
12765                                 if (flow_dv_convert_action_mark(dev, &mark,
12766                                                                 mhdr_res,
12767                                                                 error))
12768                                         return -rte_errno;
12769                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12770                                 break;
12771                         }
12772                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12773                         /*
12774                          * Only one FLAG or MARK is supported per device flow
12775                          * right now. So the pointer to the tag resource must be
12776                          * zero before the register process.
12777                          */
12778                         MLX5_ASSERT(!handle->dvh.rix_tag);
12779                         if (flow_dv_tag_resource_register(dev, tag_be,
12780                                                           dev_flow, error))
12781                                 return -rte_errno;
12782                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12783                         dev_flow->dv.actions[actions_n++] =
12784                                         dev_flow->dv.tag_resource->action;
12785                         break;
12786                 case RTE_FLOW_ACTION_TYPE_MARK:
12787                         action_flags |= MLX5_FLOW_ACTION_MARK;
12788                         dev_flow->handle->mark = 1;
12789                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12790                                 const struct rte_flow_action_mark *mark =
12791                                         (const struct rte_flow_action_mark *)
12792                                                 actions->conf;
12793
12794                                 if (flow_dv_convert_action_mark(dev, mark,
12795                                                                 mhdr_res,
12796                                                                 error))
12797                                         return -rte_errno;
12798                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12799                                 break;
12800                         }
12801                         /* Fall-through */
12802                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12803                         /* Legacy (non-extensive) MARK action. */
12804                         tag_be = mlx5_flow_mark_set
12805                               (((const struct rte_flow_action_mark *)
12806                                (actions->conf))->id);
12807                         MLX5_ASSERT(!handle->dvh.rix_tag);
12808                         if (flow_dv_tag_resource_register(dev, tag_be,
12809                                                           dev_flow, error))
12810                                 return -rte_errno;
12811                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12812                         dev_flow->dv.actions[actions_n++] =
12813                                         dev_flow->dv.tag_resource->action;
12814                         break;
12815                 case RTE_FLOW_ACTION_TYPE_SET_META:
12816                         if (flow_dv_convert_action_set_meta
12817                                 (dev, mhdr_res, attr,
12818                                  (const struct rte_flow_action_set_meta *)
12819                                   actions->conf, error))
12820                                 return -rte_errno;
12821                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12822                         break;
12823                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12824                         if (flow_dv_convert_action_set_tag
12825                                 (dev, mhdr_res,
12826                                  (const struct rte_flow_action_set_tag *)
12827                                   actions->conf, error))
12828                                 return -rte_errno;
12829                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12830                         break;
12831                 case RTE_FLOW_ACTION_TYPE_DROP:
12832                         action_flags |= MLX5_FLOW_ACTION_DROP;
12833                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12834                         break;
12835                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12836                         queue = actions->conf;
12837                         rss_desc->queue_num = 1;
12838                         rss_desc->queue[0] = queue->index;
12839                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12840                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12841                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12842                         num_of_dest++;
12843                         break;
12844                 case RTE_FLOW_ACTION_TYPE_RSS:
12845                         rss = actions->conf;
12846                         memcpy(rss_desc->queue, rss->queue,
12847                                rss->queue_num * sizeof(uint16_t));
12848                         rss_desc->queue_num = rss->queue_num;
12849                         /* NULL RSS key indicates default RSS key. */
12850                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12851                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12852                         /*
12853                          * rss->level and rss.types should be set in advance
12854                          * when expanding items for RSS.
12855                          */
12856                         action_flags |= MLX5_FLOW_ACTION_RSS;
12857                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12858                                 MLX5_FLOW_FATE_SHARED_RSS :
12859                                 MLX5_FLOW_FATE_QUEUE;
12860                         break;
12861                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12862                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12863                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12864                         if (flow->age == 0) {
12865                                 flow->age = owner_idx;
12866                                 __atomic_fetch_add(&age_act->refcnt, 1,
12867                                                    __ATOMIC_RELAXED);
12868                         }
12869                         age_act_pos = actions_n++;
12870                         action_flags |= MLX5_FLOW_ACTION_AGE;
12871                         break;
12872                 case RTE_FLOW_ACTION_TYPE_AGE:
12873                         non_shared_age = action->conf;
12874                         age_act_pos = actions_n++;
12875                         action_flags |= MLX5_FLOW_ACTION_AGE;
12876                         break;
12877                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12878                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12879                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12880                                                              NULL);
12881                         MLX5_ASSERT(cnt_act != NULL);
12882                         /**
12883                          * When creating meter drop flow in drop table, the
12884                          * counter should not overwrite the rte flow counter.
12885                          */
12886                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12887                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12888                                 dev_flow->dv.actions[actions_n++] =
12889                                                         cnt_act->action;
12890                         } else {
12891                                 if (flow->counter == 0) {
12892                                         flow->counter = owner_idx;
12893                                         __atomic_fetch_add
12894                                                 (&cnt_act->shared_info.refcnt,
12895                                                  1, __ATOMIC_RELAXED);
12896                                 }
12897                                 /* Save information first, will apply later. */
12898                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12899                         }
12900                         break;
12901                 case RTE_FLOW_ACTION_TYPE_COUNT:
12902                         if (!priv->sh->devx) {
12903                                 return rte_flow_error_set
12904                                               (error, ENOTSUP,
12905                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12906                                                NULL,
12907                                                "count action not supported");
12908                         }
12909                         /* Save information first, will apply later. */
12910                         count = action->conf;
12911                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12912                         break;
12913                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12914                         dev_flow->dv.actions[actions_n++] =
12915                                                 priv->sh->pop_vlan_action;
12916                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12917                         break;
12918                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12919                         if (!(action_flags &
12920                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12921                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12922                         vlan.eth_proto = rte_be_to_cpu_16
12923                              ((((const struct rte_flow_action_of_push_vlan *)
12924                                                    actions->conf)->ethertype));
12925                         found_action = mlx5_flow_find_action
12926                                         (actions + 1,
12927                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12928                         if (found_action)
12929                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12930                         found_action = mlx5_flow_find_action
12931                                         (actions + 1,
12932                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12933                         if (found_action)
12934                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12935                         if (flow_dv_create_action_push_vlan
12936                                             (dev, attr, &vlan, dev_flow, error))
12937                                 return -rte_errno;
12938                         dev_flow->dv.actions[actions_n++] =
12939                                         dev_flow->dv.push_vlan_res->action;
12940                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12941                         break;
12942                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12943                         /* of_vlan_push action handled this action */
12944                         MLX5_ASSERT(action_flags &
12945                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12946                         break;
12947                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12948                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12949                                 break;
12950                         flow_dev_get_vlan_info_from_items(items, &vlan);
12951                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12952                         /* If no VLAN push - this is a modify header action */
12953                         if (flow_dv_convert_action_modify_vlan_vid
12954                                                 (mhdr_res, actions, error))
12955                                 return -rte_errno;
12956                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12957                         break;
12958                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12959                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12960                         if (flow_dv_create_action_l2_encap(dev, actions,
12961                                                            dev_flow,
12962                                                            attr->transfer,
12963                                                            error))
12964                                 return -rte_errno;
12965                         dev_flow->dv.actions[actions_n++] =
12966                                         dev_flow->dv.encap_decap->action;
12967                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12968                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12969                                 sample_act->action_flags |=
12970                                                         MLX5_FLOW_ACTION_ENCAP;
12971                         break;
12972                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12973                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12974                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12975                                                            attr->transfer,
12976                                                            error))
12977                                 return -rte_errno;
12978                         dev_flow->dv.actions[actions_n++] =
12979                                         dev_flow->dv.encap_decap->action;
12980                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12981                         break;
12982                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12983                         /* Handle encap with preceding decap. */
12984                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12985                                 if (flow_dv_create_action_raw_encap
12986                                         (dev, actions, dev_flow, attr, error))
12987                                         return -rte_errno;
12988                                 dev_flow->dv.actions[actions_n++] =
12989                                         dev_flow->dv.encap_decap->action;
12990                         } else {
12991                                 /* Handle encap without preceding decap. */
12992                                 if (flow_dv_create_action_l2_encap
12993                                     (dev, actions, dev_flow, attr->transfer,
12994                                      error))
12995                                         return -rte_errno;
12996                                 dev_flow->dv.actions[actions_n++] =
12997                                         dev_flow->dv.encap_decap->action;
12998                         }
12999                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13000                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13001                                 sample_act->action_flags |=
13002                                                         MLX5_FLOW_ACTION_ENCAP;
13003                         break;
13004                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13005                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13006                                 ;
13007                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13008                                 if (flow_dv_create_action_l2_decap
13009                                     (dev, dev_flow, attr->transfer, error))
13010                                         return -rte_errno;
13011                                 dev_flow->dv.actions[actions_n++] =
13012                                         dev_flow->dv.encap_decap->action;
13013                         }
13014                         /* If decap is followed by encap, handle it at encap. */
13015                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13016                         break;
13017                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13018                         dev_flow->dv.actions[actions_n++] =
13019                                 (void *)(uintptr_t)action->conf;
13020                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13021                         break;
13022                 case RTE_FLOW_ACTION_TYPE_JUMP:
13023                         jump_group = ((const struct rte_flow_action_jump *)
13024                                                         action->conf)->group;
13025                         grp_info.std_tbl_fix = 0;
13026                         if (dev_flow->skip_scale &
13027                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13028                                 grp_info.skip_scale = 1;
13029                         else
13030                                 grp_info.skip_scale = 0;
13031                         ret = mlx5_flow_group_to_table(dev, tunnel,
13032                                                        jump_group,
13033                                                        &table,
13034                                                        &grp_info, error);
13035                         if (ret)
13036                                 return ret;
13037                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13038                                                        attr->transfer,
13039                                                        !!dev_flow->external,
13040                                                        tunnel, jump_group, 0,
13041                                                        0, error);
13042                         if (!tbl)
13043                                 return rte_flow_error_set
13044                                                 (error, errno,
13045                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13046                                                  NULL,
13047                                                  "cannot create jump action.");
13048                         if (flow_dv_jump_tbl_resource_register
13049                             (dev, tbl, dev_flow, error)) {
13050                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13051                                 return rte_flow_error_set
13052                                                 (error, errno,
13053                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13054                                                  NULL,
13055                                                  "cannot create jump action.");
13056                         }
13057                         dev_flow->dv.actions[actions_n++] =
13058                                         dev_flow->dv.jump->action;
13059                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13060                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13061                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13062                         num_of_dest++;
13063                         break;
13064                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13065                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13066                         if (flow_dv_convert_action_modify_mac
13067                                         (mhdr_res, actions, error))
13068                                 return -rte_errno;
13069                         action_flags |= actions->type ==
13070                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13071                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13072                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13073                         break;
13074                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13075                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13076                         if (flow_dv_convert_action_modify_ipv4
13077                                         (mhdr_res, actions, error))
13078                                 return -rte_errno;
13079                         action_flags |= actions->type ==
13080                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13081                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13082                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13083                         break;
13084                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13085                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13086                         if (flow_dv_convert_action_modify_ipv6
13087                                         (mhdr_res, actions, error))
13088                                 return -rte_errno;
13089                         action_flags |= actions->type ==
13090                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13091                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13092                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13093                         break;
13094                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13095                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13096                         if (flow_dv_convert_action_modify_tp
13097                                         (mhdr_res, actions, items,
13098                                          &flow_attr, dev_flow, !!(action_flags &
13099                                          MLX5_FLOW_ACTION_DECAP), error))
13100                                 return -rte_errno;
13101                         action_flags |= actions->type ==
13102                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13103                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13104                                         MLX5_FLOW_ACTION_SET_TP_DST;
13105                         break;
13106                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13107                         if (flow_dv_convert_action_modify_dec_ttl
13108                                         (mhdr_res, items, &flow_attr, dev_flow,
13109                                          !!(action_flags &
13110                                          MLX5_FLOW_ACTION_DECAP), error))
13111                                 return -rte_errno;
13112                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13113                         break;
13114                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13115                         if (flow_dv_convert_action_modify_ttl
13116                                         (mhdr_res, actions, items, &flow_attr,
13117                                          dev_flow, !!(action_flags &
13118                                          MLX5_FLOW_ACTION_DECAP), error))
13119                                 return -rte_errno;
13120                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13121                         break;
13122                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13123                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13124                         if (flow_dv_convert_action_modify_tcp_seq
13125                                         (mhdr_res, actions, error))
13126                                 return -rte_errno;
13127                         action_flags |= actions->type ==
13128                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13129                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13130                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13131                         break;
13132
13133                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13134                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13135                         if (flow_dv_convert_action_modify_tcp_ack
13136                                         (mhdr_res, actions, error))
13137                                 return -rte_errno;
13138                         action_flags |= actions->type ==
13139                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13140                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13141                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13142                         break;
13143                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13144                         if (flow_dv_convert_action_set_reg
13145                                         (mhdr_res, actions, error))
13146                                 return -rte_errno;
13147                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13148                         break;
13149                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13150                         if (flow_dv_convert_action_copy_mreg
13151                                         (dev, mhdr_res, actions, error))
13152                                 return -rte_errno;
13153                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13154                         break;
13155                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13156                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13157                         dev_flow->handle->fate_action =
13158                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13159                         break;
13160                 case RTE_FLOW_ACTION_TYPE_METER:
13161                         if (!wks->fm)
13162                                 return rte_flow_error_set(error, rte_errno,
13163                                         RTE_FLOW_ERROR_TYPE_ACTION,
13164                                         NULL, "Failed to get meter in flow.");
13165                         /* Set the meter action. */
13166                         dev_flow->dv.actions[actions_n++] =
13167                                 wks->fm->meter_action;
13168                         action_flags |= MLX5_FLOW_ACTION_METER;
13169                         break;
13170                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13171                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13172                                                               actions, error))
13173                                 return -rte_errno;
13174                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13175                         break;
13176                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13177                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13178                                                               actions, error))
13179                                 return -rte_errno;
13180                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13181                         break;
13182                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13183                         sample_act_pos = actions_n;
13184                         sample = (const struct rte_flow_action_sample *)
13185                                  action->conf;
13186                         actions_n++;
13187                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13188                         /* put encap action into group if work with port id */
13189                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13190                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13191                                 sample_act->action_flags |=
13192                                                         MLX5_FLOW_ACTION_ENCAP;
13193                         break;
13194                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13195                         if (flow_dv_convert_action_modify_field
13196                                         (dev, mhdr_res, actions, attr, error))
13197                                 return -rte_errno;
13198                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13199                         break;
13200                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13201                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13202                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13203                         if (!ct)
13204                                 return rte_flow_error_set(error, EINVAL,
13205                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13206                                                 NULL,
13207                                                 "Failed to get CT object.");
13208                         if (mlx5_aso_ct_available(priv->sh, ct))
13209                                 return rte_flow_error_set(error, rte_errno,
13210                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13211                                                 NULL,
13212                                                 "CT is unavailable.");
13213                         if (ct->is_original)
13214                                 dev_flow->dv.actions[actions_n] =
13215                                                         ct->dr_action_orig;
13216                         else
13217                                 dev_flow->dv.actions[actions_n] =
13218                                                         ct->dr_action_rply;
13219                         if (flow->ct == 0) {
13220                                 flow->indirect_type =
13221                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13222                                 flow->ct = owner_idx;
13223                                 __atomic_fetch_add(&ct->refcnt, 1,
13224                                                    __ATOMIC_RELAXED);
13225                         }
13226                         actions_n++;
13227                         action_flags |= MLX5_FLOW_ACTION_CT;
13228                         break;
13229                 case RTE_FLOW_ACTION_TYPE_END:
13230                         actions_end = true;
13231                         if (mhdr_res->actions_num) {
13232                                 /* create modify action if needed. */
13233                                 if (flow_dv_modify_hdr_resource_register
13234                                         (dev, mhdr_res, dev_flow, error))
13235                                         return -rte_errno;
13236                                 dev_flow->dv.actions[modify_action_position] =
13237                                         handle->dvh.modify_hdr->action;
13238                         }
13239                         /*
13240                          * Handle AGE and COUNT action by single HW counter
13241                          * when they are not shared.
13242                          */
13243                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13244                                 if ((non_shared_age && count) ||
13245                                     !(priv->sh->flow_hit_aso_en &&
13246                                       (attr->group || attr->transfer))) {
13247                                         /* Creates age by counters. */
13248                                         cnt_act = flow_dv_prepare_counter
13249                                                                 (dev, dev_flow,
13250                                                                  flow, count,
13251                                                                  non_shared_age,
13252                                                                  error);
13253                                         if (!cnt_act)
13254                                                 return -rte_errno;
13255                                         dev_flow->dv.actions[age_act_pos] =
13256                                                                 cnt_act->action;
13257                                         break;
13258                                 }
13259                                 if (!flow->age && non_shared_age) {
13260                                         flow->age = flow_dv_aso_age_alloc
13261                                                                 (dev, error);
13262                                         if (!flow->age)
13263                                                 return -rte_errno;
13264                                         flow_dv_aso_age_params_init
13265                                                     (dev, flow->age,
13266                                                      non_shared_age->context ?
13267                                                      non_shared_age->context :
13268                                                      (void *)(uintptr_t)
13269                                                      (dev_flow->flow_idx),
13270                                                      non_shared_age->timeout);
13271                                 }
13272                                 age_act = flow_aso_age_get_by_idx(dev,
13273                                                                   flow->age);
13274                                 dev_flow->dv.actions[age_act_pos] =
13275                                                              age_act->dr_action;
13276                         }
13277                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13278                                 /*
13279                                  * Create one count action, to be used
13280                                  * by all sub-flows.
13281                                  */
13282                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13283                                                                   flow, count,
13284                                                                   NULL, error);
13285                                 if (!cnt_act)
13286                                         return -rte_errno;
13287                                 dev_flow->dv.actions[actions_n++] =
13288                                                                 cnt_act->action;
13289                         }
13290                 default:
13291                         break;
13292                 }
13293                 if (mhdr_res->actions_num &&
13294                     modify_action_position == UINT32_MAX)
13295                         modify_action_position = actions_n++;
13296         }
13297         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13298                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13299                 int item_type = items->type;
13300
13301                 if (!mlx5_flow_os_item_supported(item_type))
13302                         return rte_flow_error_set(error, ENOTSUP,
13303                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13304                                                   NULL, "item not supported");
13305                 switch (item_type) {
13306                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13307                         flow_dv_translate_item_port_id
13308                                 (dev, match_mask, match_value, items, attr);
13309                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13310                         break;
13311                 case RTE_FLOW_ITEM_TYPE_ETH:
13312                         flow_dv_translate_item_eth(match_mask, match_value,
13313                                                    items, tunnel,
13314                                                    dev_flow->dv.group);
13315                         matcher.priority = action_flags &
13316                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13317                                         !dev_flow->external ?
13318                                         MLX5_PRIORITY_MAP_L3 :
13319                                         MLX5_PRIORITY_MAP_L2;
13320                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13321                                              MLX5_FLOW_LAYER_OUTER_L2;
13322                         break;
13323                 case RTE_FLOW_ITEM_TYPE_VLAN:
13324                         flow_dv_translate_item_vlan(dev_flow,
13325                                                     match_mask, match_value,
13326                                                     items, tunnel,
13327                                                     dev_flow->dv.group);
13328                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13329                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13330                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13331                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13332                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13333                         break;
13334                 case RTE_FLOW_ITEM_TYPE_IPV4:
13335                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13336                                                   &item_flags, &tunnel);
13337                         flow_dv_translate_item_ipv4(match_mask, match_value,
13338                                                     items, tunnel,
13339                                                     dev_flow->dv.group);
13340                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13341                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13342                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13343                         if (items->mask != NULL &&
13344                             ((const struct rte_flow_item_ipv4 *)
13345                              items->mask)->hdr.next_proto_id) {
13346                                 next_protocol =
13347                                         ((const struct rte_flow_item_ipv4 *)
13348                                          (items->spec))->hdr.next_proto_id;
13349                                 next_protocol &=
13350                                         ((const struct rte_flow_item_ipv4 *)
13351                                          (items->mask))->hdr.next_proto_id;
13352                         } else {
13353                                 /* Reset for inner layer. */
13354                                 next_protocol = 0xff;
13355                         }
13356                         break;
13357                 case RTE_FLOW_ITEM_TYPE_IPV6:
13358                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13359                                                   &item_flags, &tunnel);
13360                         flow_dv_translate_item_ipv6(match_mask, match_value,
13361                                                     items, tunnel,
13362                                                     dev_flow->dv.group);
13363                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13364                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13365                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13366                         if (items->mask != NULL &&
13367                             ((const struct rte_flow_item_ipv6 *)
13368                              items->mask)->hdr.proto) {
13369                                 next_protocol =
13370                                         ((const struct rte_flow_item_ipv6 *)
13371                                          items->spec)->hdr.proto;
13372                                 next_protocol &=
13373                                         ((const struct rte_flow_item_ipv6 *)
13374                                          items->mask)->hdr.proto;
13375                         } else {
13376                                 /* Reset for inner layer. */
13377                                 next_protocol = 0xff;
13378                         }
13379                         break;
13380                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13381                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13382                                                              match_value,
13383                                                              items, tunnel);
13384                         last_item = tunnel ?
13385                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13386                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13387                         if (items->mask != NULL &&
13388                             ((const struct rte_flow_item_ipv6_frag_ext *)
13389                              items->mask)->hdr.next_header) {
13390                                 next_protocol =
13391                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13392                                  items->spec)->hdr.next_header;
13393                                 next_protocol &=
13394                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13395                                  items->mask)->hdr.next_header;
13396                         } else {
13397                                 /* Reset for inner layer. */
13398                                 next_protocol = 0xff;
13399                         }
13400                         break;
13401                 case RTE_FLOW_ITEM_TYPE_TCP:
13402                         flow_dv_translate_item_tcp(match_mask, match_value,
13403                                                    items, tunnel);
13404                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13405                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13406                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13407                         break;
13408                 case RTE_FLOW_ITEM_TYPE_UDP:
13409                         flow_dv_translate_item_udp(match_mask, match_value,
13410                                                    items, tunnel);
13411                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13412                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13413                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13414                         break;
13415                 case RTE_FLOW_ITEM_TYPE_GRE:
13416                         flow_dv_translate_item_gre(match_mask, match_value,
13417                                                    items, tunnel);
13418                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13419                         last_item = MLX5_FLOW_LAYER_GRE;
13420                         break;
13421                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13422                         flow_dv_translate_item_gre_key(match_mask,
13423                                                        match_value, items);
13424                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13425                         break;
13426                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13427                         flow_dv_translate_item_nvgre(match_mask, match_value,
13428                                                      items, tunnel);
13429                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13430                         last_item = MLX5_FLOW_LAYER_GRE;
13431                         break;
13432                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13433                         flow_dv_translate_item_vxlan(dev, attr,
13434                                                      match_mask, match_value,
13435                                                      items, tunnel);
13436                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13437                         last_item = MLX5_FLOW_LAYER_VXLAN;
13438                         break;
13439                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13440                         flow_dv_translate_item_vxlan_gpe(match_mask,
13441                                                          match_value, items,
13442                                                          tunnel);
13443                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13444                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13445                         break;
13446                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13447                         flow_dv_translate_item_geneve(match_mask, match_value,
13448                                                       items, tunnel);
13449                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13450                         last_item = MLX5_FLOW_LAYER_GENEVE;
13451                         break;
13452                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13453                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13454                                                           match_value,
13455                                                           items, error);
13456                         if (ret)
13457                                 return rte_flow_error_set(error, -ret,
13458                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13459                                         "cannot create GENEVE TLV option");
13460                         flow->geneve_tlv_option = 1;
13461                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13462                         break;
13463                 case RTE_FLOW_ITEM_TYPE_MPLS:
13464                         flow_dv_translate_item_mpls(match_mask, match_value,
13465                                                     items, last_item, tunnel);
13466                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13467                         last_item = MLX5_FLOW_LAYER_MPLS;
13468                         break;
13469                 case RTE_FLOW_ITEM_TYPE_MARK:
13470                         flow_dv_translate_item_mark(dev, match_mask,
13471                                                     match_value, items);
13472                         last_item = MLX5_FLOW_ITEM_MARK;
13473                         break;
13474                 case RTE_FLOW_ITEM_TYPE_META:
13475                         flow_dv_translate_item_meta(dev, match_mask,
13476                                                     match_value, attr, items);
13477                         last_item = MLX5_FLOW_ITEM_METADATA;
13478                         break;
13479                 case RTE_FLOW_ITEM_TYPE_ICMP:
13480                         flow_dv_translate_item_icmp(match_mask, match_value,
13481                                                     items, tunnel);
13482                         last_item = MLX5_FLOW_LAYER_ICMP;
13483                         break;
13484                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13485                         flow_dv_translate_item_icmp6(match_mask, match_value,
13486                                                       items, tunnel);
13487                         last_item = MLX5_FLOW_LAYER_ICMP6;
13488                         break;
13489                 case RTE_FLOW_ITEM_TYPE_TAG:
13490                         flow_dv_translate_item_tag(dev, match_mask,
13491                                                    match_value, items);
13492                         last_item = MLX5_FLOW_ITEM_TAG;
13493                         break;
13494                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13495                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13496                                                         match_value, items);
13497                         last_item = MLX5_FLOW_ITEM_TAG;
13498                         break;
13499                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13500                         flow_dv_translate_item_tx_queue(dev, match_mask,
13501                                                         match_value,
13502                                                         items);
13503                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13504                         break;
13505                 case RTE_FLOW_ITEM_TYPE_GTP:
13506                         flow_dv_translate_item_gtp(match_mask, match_value,
13507                                                    items, tunnel);
13508                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13509                         last_item = MLX5_FLOW_LAYER_GTP;
13510                         break;
13511                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13512                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13513                                                           match_value,
13514                                                           items);
13515                         if (ret)
13516                                 return rte_flow_error_set(error, -ret,
13517                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13518                                         "cannot create GTP PSC item");
13519                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13520                         break;
13521                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13522                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13523                                 /* Create it only the first time to be used. */
13524                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13525                                 if (ret)
13526                                         return rte_flow_error_set
13527                                                 (error, -ret,
13528                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13529                                                 NULL,
13530                                                 "cannot create eCPRI parser");
13531                         }
13532                         flow_dv_translate_item_ecpri(dev, match_mask,
13533                                                      match_value, items,
13534                                                      last_item);
13535                         /* No other protocol should follow eCPRI layer. */
13536                         last_item = MLX5_FLOW_LAYER_ECPRI;
13537                         break;
13538                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13539                         flow_dv_translate_item_integrity(items, integrity_items,
13540                                                          &last_item);
13541                         break;
13542                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13543                         flow_dv_translate_item_aso_ct(dev, match_mask,
13544                                                       match_value, items);
13545                         break;
13546                 case RTE_FLOW_ITEM_TYPE_FLEX:
13547                         flow_dv_translate_item_flex(dev, match_mask,
13548                                                     match_value, items,
13549                                                     dev_flow, tunnel != 0);
13550                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13551                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13552                         break;
13553                 default:
13554                         break;
13555                 }
13556                 item_flags |= last_item;
13557         }
13558         /*
13559          * When E-Switch mode is enabled, we have two cases where we need to
13560          * set the source port manually.
13561          * The first one, is in case of Nic steering rule, and the second is
13562          * E-Switch rule where no port_id item was found. In both cases
13563          * the source port is set according the current port in use.
13564          */
13565         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13566             (priv->representor || priv->master)) {
13567                 if (flow_dv_translate_item_port_id(dev, match_mask,
13568                                                    match_value, NULL, attr))
13569                         return -rte_errno;
13570         }
13571         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13572                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13573                                                       integrity_items,
13574                                                       item_flags);
13575         }
13576 #ifdef RTE_LIBRTE_MLX5_DEBUG
13577         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13578                                               dev_flow->dv.value.buf));
13579 #endif
13580         /*
13581          * Layers may be already initialized from prefix flow if this dev_flow
13582          * is the suffix flow.
13583          */
13584         handle->layers |= item_flags;
13585         if (action_flags & MLX5_FLOW_ACTION_RSS)
13586                 flow_dv_hashfields_set(dev_flow, rss_desc);
13587         /* If has RSS action in the sample action, the Sample/Mirror resource
13588          * should be registered after the hash filed be update.
13589          */
13590         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13591                 ret = flow_dv_translate_action_sample(dev,
13592                                                       sample,
13593                                                       dev_flow, attr,
13594                                                       &num_of_dest,
13595                                                       sample_actions,
13596                                                       &sample_res,
13597                                                       error);
13598                 if (ret < 0)
13599                         return ret;
13600                 ret = flow_dv_create_action_sample(dev,
13601                                                    dev_flow,
13602                                                    num_of_dest,
13603                                                    &sample_res,
13604                                                    &mdest_res,
13605                                                    sample_actions,
13606                                                    action_flags,
13607                                                    error);
13608                 if (ret < 0)
13609                         return rte_flow_error_set
13610                                                 (error, rte_errno,
13611                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13612                                                 NULL,
13613                                                 "cannot create sample action");
13614                 if (num_of_dest > 1) {
13615                         dev_flow->dv.actions[sample_act_pos] =
13616                         dev_flow->dv.dest_array_res->action;
13617                 } else {
13618                         dev_flow->dv.actions[sample_act_pos] =
13619                         dev_flow->dv.sample_res->verbs_action;
13620                 }
13621         }
13622         /*
13623          * For multiple destination (sample action with ratio=1), the encap
13624          * action and port id action will be combined into group action.
13625          * So need remove the original these actions in the flow and only
13626          * use the sample action instead of.
13627          */
13628         if (num_of_dest > 1 &&
13629             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13630                 int i;
13631                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13632
13633                 for (i = 0; i < actions_n; i++) {
13634                         if ((sample_act->dr_encap_action &&
13635                                 sample_act->dr_encap_action ==
13636                                 dev_flow->dv.actions[i]) ||
13637                                 (sample_act->dr_port_id_action &&
13638                                 sample_act->dr_port_id_action ==
13639                                 dev_flow->dv.actions[i]) ||
13640                                 (sample_act->dr_jump_action &&
13641                                 sample_act->dr_jump_action ==
13642                                 dev_flow->dv.actions[i]))
13643                                 continue;
13644                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13645                 }
13646                 memcpy((void *)dev_flow->dv.actions,
13647                                 (void *)temp_actions,
13648                                 tmp_actions_n * sizeof(void *));
13649                 actions_n = tmp_actions_n;
13650         }
13651         dev_flow->dv.actions_n = actions_n;
13652         dev_flow->act_flags = action_flags;
13653         if (wks->skip_matcher_reg)
13654                 return 0;
13655         /* Register matcher. */
13656         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13657                                     matcher.mask.size);
13658         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13659                                                      matcher.priority,
13660                                                      dev_flow->external);
13661         /**
13662          * When creating meter drop flow in drop table, using original
13663          * 5-tuple match, the matcher priority should be lower than
13664          * mtr_id matcher.
13665          */
13666         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13667             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13668             matcher.priority <= MLX5_REG_BITS)
13669                 matcher.priority += MLX5_REG_BITS;
13670         /* reserved field no needs to be set to 0 here. */
13671         tbl_key.is_fdb = attr->transfer;
13672         tbl_key.is_egress = attr->egress;
13673         tbl_key.level = dev_flow->dv.group;
13674         tbl_key.id = dev_flow->dv.table_id;
13675         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13676                                      tunnel, attr->group, error))
13677                 return -rte_errno;
13678         return 0;
13679 }
13680
13681 /**
13682  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13683  * and tunnel.
13684  *
13685  * @param[in, out] action
13686  *   Shred RSS action holding hash RX queue objects.
13687  * @param[in] hash_fields
13688  *   Defines combination of packet fields to participate in RX hash.
13689  * @param[in] tunnel
13690  *   Tunnel type
13691  * @param[in] hrxq_idx
13692  *   Hash RX queue index to set.
13693  *
13694  * @return
13695  *   0 on success, otherwise negative errno value.
13696  */
13697 static int
13698 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13699                               const uint64_t hash_fields,
13700                               uint32_t hrxq_idx)
13701 {
13702         uint32_t *hrxqs = action->hrxq;
13703
13704         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13705         case MLX5_RSS_HASH_IPV4:
13706                 /* fall-through. */
13707         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13708                 /* fall-through. */
13709         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13710                 hrxqs[0] = hrxq_idx;
13711                 return 0;
13712         case MLX5_RSS_HASH_IPV4_TCP:
13713                 /* fall-through. */
13714         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13715                 /* fall-through. */
13716         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13717                 hrxqs[1] = hrxq_idx;
13718                 return 0;
13719         case MLX5_RSS_HASH_IPV4_UDP:
13720                 /* fall-through. */
13721         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13722                 /* fall-through. */
13723         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13724                 hrxqs[2] = hrxq_idx;
13725                 return 0;
13726         case MLX5_RSS_HASH_IPV6:
13727                 /* fall-through. */
13728         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13729                 /* fall-through. */
13730         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13731                 hrxqs[3] = hrxq_idx;
13732                 return 0;
13733         case MLX5_RSS_HASH_IPV6_TCP:
13734                 /* fall-through. */
13735         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13736                 /* fall-through. */
13737         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13738                 hrxqs[4] = hrxq_idx;
13739                 return 0;
13740         case MLX5_RSS_HASH_IPV6_UDP:
13741                 /* fall-through. */
13742         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13743                 /* fall-through. */
13744         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13745                 hrxqs[5] = hrxq_idx;
13746                 return 0;
13747         case MLX5_RSS_HASH_NONE:
13748                 hrxqs[6] = hrxq_idx;
13749                 return 0;
13750         default:
13751                 return -1;
13752         }
13753 }
13754
13755 /**
13756  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13757  * and tunnel.
13758  *
13759  * @param[in] dev
13760  *   Pointer to the Ethernet device structure.
13761  * @param[in] idx
13762  *   Shared RSS action ID holding hash RX queue objects.
13763  * @param[in] hash_fields
13764  *   Defines combination of packet fields to participate in RX hash.
13765  * @param[in] tunnel
13766  *   Tunnel type
13767  *
13768  * @return
13769  *   Valid hash RX queue index, otherwise 0.
13770  */
13771 static uint32_t
13772 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13773                                  const uint64_t hash_fields)
13774 {
13775         struct mlx5_priv *priv = dev->data->dev_private;
13776         struct mlx5_shared_action_rss *shared_rss =
13777             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13778         const uint32_t *hrxqs = shared_rss->hrxq;
13779
13780         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13781         case MLX5_RSS_HASH_IPV4:
13782                 /* fall-through. */
13783         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13784                 /* fall-through. */
13785         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13786                 return hrxqs[0];
13787         case MLX5_RSS_HASH_IPV4_TCP:
13788                 /* fall-through. */
13789         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13790                 /* fall-through. */
13791         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13792                 return hrxqs[1];
13793         case MLX5_RSS_HASH_IPV4_UDP:
13794                 /* fall-through. */
13795         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13796                 /* fall-through. */
13797         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13798                 return hrxqs[2];
13799         case MLX5_RSS_HASH_IPV6:
13800                 /* fall-through. */
13801         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13802                 /* fall-through. */
13803         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13804                 return hrxqs[3];
13805         case MLX5_RSS_HASH_IPV6_TCP:
13806                 /* fall-through. */
13807         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13808                 /* fall-through. */
13809         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13810                 return hrxqs[4];
13811         case MLX5_RSS_HASH_IPV6_UDP:
13812                 /* fall-through. */
13813         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13814                 /* fall-through. */
13815         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13816                 return hrxqs[5];
13817         case MLX5_RSS_HASH_NONE:
13818                 return hrxqs[6];
13819         default:
13820                 return 0;
13821         }
13822
13823 }
13824
13825 /**
13826  * Apply the flow to the NIC, lock free,
13827  * (mutex should be acquired by caller).
13828  *
13829  * @param[in] dev
13830  *   Pointer to the Ethernet device structure.
13831  * @param[in, out] flow
13832  *   Pointer to flow structure.
13833  * @param[out] error
13834  *   Pointer to error structure.
13835  *
13836  * @return
13837  *   0 on success, a negative errno value otherwise and rte_errno is set.
13838  */
13839 static int
13840 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13841               struct rte_flow_error *error)
13842 {
13843         struct mlx5_flow_dv_workspace *dv;
13844         struct mlx5_flow_handle *dh;
13845         struct mlx5_flow_handle_dv *dv_h;
13846         struct mlx5_flow *dev_flow;
13847         struct mlx5_priv *priv = dev->data->dev_private;
13848         uint32_t handle_idx;
13849         int n;
13850         int err;
13851         int idx;
13852         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13853         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13854         uint8_t misc_mask;
13855
13856         MLX5_ASSERT(wks);
13857         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13858                 dev_flow = &wks->flows[idx];
13859                 dv = &dev_flow->dv;
13860                 dh = dev_flow->handle;
13861                 dv_h = &dh->dvh;
13862                 n = dv->actions_n;
13863                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13864                         if (dv->transfer) {
13865                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13866                                 dv->actions[n++] = priv->sh->dr_drop_action;
13867                         } else {
13868 #ifdef HAVE_MLX5DV_DR
13869                                 /* DR supports drop action placeholder. */
13870                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13871                                 dv->actions[n++] = dv->group ?
13872                                         priv->sh->dr_drop_action :
13873                                         priv->root_drop_action;
13874 #else
13875                                 /* For DV we use the explicit drop queue. */
13876                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13877                                 dv->actions[n++] =
13878                                                 priv->drop_queue.hrxq->action;
13879 #endif
13880                         }
13881                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13882                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13883                         struct mlx5_hrxq *hrxq;
13884                         uint32_t hrxq_idx;
13885
13886                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13887                                                     &hrxq_idx);
13888                         if (!hrxq) {
13889                                 rte_flow_error_set
13890                                         (error, rte_errno,
13891                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13892                                          "cannot get hash queue");
13893                                 goto error;
13894                         }
13895                         dh->rix_hrxq = hrxq_idx;
13896                         dv->actions[n++] = hrxq->action;
13897                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13898                         struct mlx5_hrxq *hrxq = NULL;
13899                         uint32_t hrxq_idx;
13900
13901                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13902                                                 rss_desc->shared_rss,
13903                                                 dev_flow->hash_fields);
13904                         if (hrxq_idx)
13905                                 hrxq = mlx5_ipool_get
13906                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13907                                          hrxq_idx);
13908                         if (!hrxq) {
13909                                 rte_flow_error_set
13910                                         (error, rte_errno,
13911                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13912                                          "cannot get hash queue");
13913                                 goto error;
13914                         }
13915                         dh->rix_srss = rss_desc->shared_rss;
13916                         dv->actions[n++] = hrxq->action;
13917                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13918                         if (!priv->sh->default_miss_action) {
13919                                 rte_flow_error_set
13920                                         (error, rte_errno,
13921                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13922                                          "default miss action not be created.");
13923                                 goto error;
13924                         }
13925                         dv->actions[n++] = priv->sh->default_miss_action;
13926                 }
13927                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13928                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13929                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13930                                                (void *)&dv->value, n,
13931                                                dv->actions, &dh->drv_flow);
13932                 if (err) {
13933                         rte_flow_error_set
13934                                 (error, errno,
13935                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13936                                 NULL,
13937                                 (!priv->config.allow_duplicate_pattern &&
13938                                 errno == EEXIST) ?
13939                                 "duplicating pattern is not allowed" :
13940                                 "hardware refuses to create flow");
13941                         goto error;
13942                 }
13943                 if (priv->vmwa_context &&
13944                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13945                         /*
13946                          * The rule contains the VLAN pattern.
13947                          * For VF we are going to create VLAN
13948                          * interface to make hypervisor set correct
13949                          * e-Switch vport context.
13950                          */
13951                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13952                 }
13953         }
13954         return 0;
13955 error:
13956         err = rte_errno; /* Save rte_errno before cleanup. */
13957         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13958                        handle_idx, dh, next) {
13959                 /* hrxq is union, don't clear it if the flag is not set. */
13960                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13961                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13962                         dh->rix_hrxq = 0;
13963                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13964                         dh->rix_srss = 0;
13965                 }
13966                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13967                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13968         }
13969         rte_errno = err; /* Restore rte_errno. */
13970         return -rte_errno;
13971 }
13972
13973 void
13974 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13975                           struct mlx5_list_entry *entry)
13976 {
13977         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13978                                                              typeof(*resource),
13979                                                              entry);
13980
13981         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13982         mlx5_free(resource);
13983 }
13984
13985 /**
13986  * Release the flow matcher.
13987  *
13988  * @param dev
13989  *   Pointer to Ethernet device.
13990  * @param port_id
13991  *   Index to port ID action resource.
13992  *
13993  * @return
13994  *   1 while a reference on it exists, 0 when freed.
13995  */
13996 static int
13997 flow_dv_matcher_release(struct rte_eth_dev *dev,
13998                         struct mlx5_flow_handle *handle)
13999 {
14000         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14001         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14002                                                             typeof(*tbl), tbl);
14003         int ret;
14004
14005         MLX5_ASSERT(matcher->matcher_object);
14006         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14007         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14008         return ret;
14009 }
14010
14011 void
14012 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14013 {
14014         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14015         struct mlx5_flow_dv_encap_decap_resource *res =
14016                                        container_of(entry, typeof(*res), entry);
14017
14018         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14019         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14020 }
14021
14022 /**
14023  * Release an encap/decap resource.
14024  *
14025  * @param dev
14026  *   Pointer to Ethernet device.
14027  * @param encap_decap_idx
14028  *   Index of encap decap resource.
14029  *
14030  * @return
14031  *   1 while a reference on it exists, 0 when freed.
14032  */
14033 static int
14034 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14035                                      uint32_t encap_decap_idx)
14036 {
14037         struct mlx5_priv *priv = dev->data->dev_private;
14038         struct mlx5_flow_dv_encap_decap_resource *resource;
14039
14040         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14041                                   encap_decap_idx);
14042         if (!resource)
14043                 return 0;
14044         MLX5_ASSERT(resource->action);
14045         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14046 }
14047
14048 /**
14049  * Release an jump to table action resource.
14050  *
14051  * @param dev
14052  *   Pointer to Ethernet device.
14053  * @param rix_jump
14054  *   Index to the jump action resource.
14055  *
14056  * @return
14057  *   1 while a reference on it exists, 0 when freed.
14058  */
14059 static int
14060 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14061                                   uint32_t rix_jump)
14062 {
14063         struct mlx5_priv *priv = dev->data->dev_private;
14064         struct mlx5_flow_tbl_data_entry *tbl_data;
14065
14066         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14067                                   rix_jump);
14068         if (!tbl_data)
14069                 return 0;
14070         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14071 }
14072
14073 void
14074 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14075 {
14076         struct mlx5_flow_dv_modify_hdr_resource *res =
14077                 container_of(entry, typeof(*res), entry);
14078         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14079
14080         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14081         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14082 }
14083
14084 /**
14085  * Release a modify-header resource.
14086  *
14087  * @param dev
14088  *   Pointer to Ethernet device.
14089  * @param handle
14090  *   Pointer to mlx5_flow_handle.
14091  *
14092  * @return
14093  *   1 while a reference on it exists, 0 when freed.
14094  */
14095 static int
14096 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14097                                     struct mlx5_flow_handle *handle)
14098 {
14099         struct mlx5_priv *priv = dev->data->dev_private;
14100         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14101
14102         MLX5_ASSERT(entry->action);
14103         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14104 }
14105
14106 void
14107 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14108 {
14109         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14110         struct mlx5_flow_dv_port_id_action_resource *resource =
14111                                   container_of(entry, typeof(*resource), entry);
14112
14113         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14114         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14115 }
14116
14117 /**
14118  * Release port ID action resource.
14119  *
14120  * @param dev
14121  *   Pointer to Ethernet device.
14122  * @param handle
14123  *   Pointer to mlx5_flow_handle.
14124  *
14125  * @return
14126  *   1 while a reference on it exists, 0 when freed.
14127  */
14128 static int
14129 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14130                                         uint32_t port_id)
14131 {
14132         struct mlx5_priv *priv = dev->data->dev_private;
14133         struct mlx5_flow_dv_port_id_action_resource *resource;
14134
14135         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14136         if (!resource)
14137                 return 0;
14138         MLX5_ASSERT(resource->action);
14139         return mlx5_list_unregister(priv->sh->port_id_action_list,
14140                                     &resource->entry);
14141 }
14142
14143 /**
14144  * Release shared RSS action resource.
14145  *
14146  * @param dev
14147  *   Pointer to Ethernet device.
14148  * @param srss
14149  *   Shared RSS action index.
14150  */
14151 static void
14152 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14153 {
14154         struct mlx5_priv *priv = dev->data->dev_private;
14155         struct mlx5_shared_action_rss *shared_rss;
14156
14157         shared_rss = mlx5_ipool_get
14158                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14159         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14160 }
14161
14162 void
14163 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14164 {
14165         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14166         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14167                         container_of(entry, typeof(*resource), entry);
14168
14169         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14170         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14171 }
14172
14173 /**
14174  * Release push vlan action resource.
14175  *
14176  * @param dev
14177  *   Pointer to Ethernet device.
14178  * @param handle
14179  *   Pointer to mlx5_flow_handle.
14180  *
14181  * @return
14182  *   1 while a reference on it exists, 0 when freed.
14183  */
14184 static int
14185 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14186                                           struct mlx5_flow_handle *handle)
14187 {
14188         struct mlx5_priv *priv = dev->data->dev_private;
14189         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14190         uint32_t idx = handle->dvh.rix_push_vlan;
14191
14192         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14193         if (!resource)
14194                 return 0;
14195         MLX5_ASSERT(resource->action);
14196         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14197                                     &resource->entry);
14198 }
14199
14200 /**
14201  * Release the fate resource.
14202  *
14203  * @param dev
14204  *   Pointer to Ethernet device.
14205  * @param handle
14206  *   Pointer to mlx5_flow_handle.
14207  */
14208 static void
14209 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14210                                struct mlx5_flow_handle *handle)
14211 {
14212         if (!handle->rix_fate)
14213                 return;
14214         switch (handle->fate_action) {
14215         case MLX5_FLOW_FATE_QUEUE:
14216                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14217                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14218                 break;
14219         case MLX5_FLOW_FATE_JUMP:
14220                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14221                 break;
14222         case MLX5_FLOW_FATE_PORT_ID:
14223                 flow_dv_port_id_action_resource_release(dev,
14224                                 handle->rix_port_id_action);
14225                 break;
14226         default:
14227                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14228                 break;
14229         }
14230         handle->rix_fate = 0;
14231 }
14232
14233 void
14234 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14235                          struct mlx5_list_entry *entry)
14236 {
14237         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14238                                                               typeof(*resource),
14239                                                               entry);
14240         struct rte_eth_dev *dev = resource->dev;
14241         struct mlx5_priv *priv = dev->data->dev_private;
14242
14243         if (resource->verbs_action)
14244                 claim_zero(mlx5_flow_os_destroy_flow_action
14245                                                       (resource->verbs_action));
14246         if (resource->normal_path_tbl)
14247                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14248                                              resource->normal_path_tbl);
14249         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14250         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14251         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14252 }
14253
14254 /**
14255  * Release an sample resource.
14256  *
14257  * @param dev
14258  *   Pointer to Ethernet device.
14259  * @param handle
14260  *   Pointer to mlx5_flow_handle.
14261  *
14262  * @return
14263  *   1 while a reference on it exists, 0 when freed.
14264  */
14265 static int
14266 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14267                                      struct mlx5_flow_handle *handle)
14268 {
14269         struct mlx5_priv *priv = dev->data->dev_private;
14270         struct mlx5_flow_dv_sample_resource *resource;
14271
14272         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14273                                   handle->dvh.rix_sample);
14274         if (!resource)
14275                 return 0;
14276         MLX5_ASSERT(resource->verbs_action);
14277         return mlx5_list_unregister(priv->sh->sample_action_list,
14278                                     &resource->entry);
14279 }
14280
14281 void
14282 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14283                              struct mlx5_list_entry *entry)
14284 {
14285         struct mlx5_flow_dv_dest_array_resource *resource =
14286                         container_of(entry, typeof(*resource), entry);
14287         struct rte_eth_dev *dev = resource->dev;
14288         struct mlx5_priv *priv = dev->data->dev_private;
14289         uint32_t i = 0;
14290
14291         MLX5_ASSERT(resource->action);
14292         if (resource->action)
14293                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14294         for (; i < resource->num_of_dest; i++)
14295                 flow_dv_sample_sub_actions_release(dev,
14296                                                    &resource->sample_idx[i]);
14297         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14298         DRV_LOG(DEBUG, "destination array resource %p: removed",
14299                 (void *)resource);
14300 }
14301
14302 /**
14303  * Release an destination array resource.
14304  *
14305  * @param dev
14306  *   Pointer to Ethernet device.
14307  * @param handle
14308  *   Pointer to mlx5_flow_handle.
14309  *
14310  * @return
14311  *   1 while a reference on it exists, 0 when freed.
14312  */
14313 static int
14314 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14315                                     struct mlx5_flow_handle *handle)
14316 {
14317         struct mlx5_priv *priv = dev->data->dev_private;
14318         struct mlx5_flow_dv_dest_array_resource *resource;
14319
14320         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14321                                   handle->dvh.rix_dest_array);
14322         if (!resource)
14323                 return 0;
14324         MLX5_ASSERT(resource->action);
14325         return mlx5_list_unregister(priv->sh->dest_array_list,
14326                                     &resource->entry);
14327 }
14328
14329 static void
14330 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14331 {
14332         struct mlx5_priv *priv = dev->data->dev_private;
14333         struct mlx5_dev_ctx_shared *sh = priv->sh;
14334         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14335                                 sh->geneve_tlv_option_resource;
14336         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14337         if (geneve_opt_resource) {
14338                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14339                                          __ATOMIC_RELAXED))) {
14340                         claim_zero(mlx5_devx_cmd_destroy
14341                                         (geneve_opt_resource->obj));
14342                         mlx5_free(sh->geneve_tlv_option_resource);
14343                         sh->geneve_tlv_option_resource = NULL;
14344                 }
14345         }
14346         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14347 }
14348
14349 /**
14350  * Remove the flow from the NIC but keeps it in memory.
14351  * Lock free, (mutex should be acquired by caller).
14352  *
14353  * @param[in] dev
14354  *   Pointer to Ethernet device.
14355  * @param[in, out] flow
14356  *   Pointer to flow structure.
14357  */
14358 static void
14359 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14360 {
14361         struct mlx5_flow_handle *dh;
14362         uint32_t handle_idx;
14363         struct mlx5_priv *priv = dev->data->dev_private;
14364
14365         if (!flow)
14366                 return;
14367         handle_idx = flow->dev_handles;
14368         while (handle_idx) {
14369                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14370                                     handle_idx);
14371                 if (!dh)
14372                         return;
14373                 if (dh->drv_flow) {
14374                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14375                         dh->drv_flow = NULL;
14376                 }
14377                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14378                         flow_dv_fate_resource_release(dev, dh);
14379                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14380                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14381                 handle_idx = dh->next.next;
14382         }
14383 }
14384
14385 /**
14386  * Remove the flow from the NIC and the memory.
14387  * Lock free, (mutex should be acquired by caller).
14388  *
14389  * @param[in] dev
14390  *   Pointer to the Ethernet device structure.
14391  * @param[in, out] flow
14392  *   Pointer to flow structure.
14393  */
14394 static void
14395 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14396 {
14397         struct mlx5_flow_handle *dev_handle;
14398         struct mlx5_priv *priv = dev->data->dev_private;
14399         struct mlx5_flow_meter_info *fm = NULL;
14400         uint32_t srss = 0;
14401
14402         if (!flow)
14403                 return;
14404         flow_dv_remove(dev, flow);
14405         if (flow->counter) {
14406                 flow_dv_counter_free(dev, flow->counter);
14407                 flow->counter = 0;
14408         }
14409         if (flow->meter) {
14410                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14411                 if (fm)
14412                         mlx5_flow_meter_detach(priv, fm);
14413                 flow->meter = 0;
14414         }
14415         /* Keep the current age handling by default. */
14416         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14417                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14418         else if (flow->age)
14419                 flow_dv_aso_age_release(dev, flow->age);
14420         if (flow->geneve_tlv_option) {
14421                 flow_dv_geneve_tlv_option_resource_release(dev);
14422                 flow->geneve_tlv_option = 0;
14423         }
14424         while (flow->dev_handles) {
14425                 uint32_t tmp_idx = flow->dev_handles;
14426
14427                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14428                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14429                 if (!dev_handle)
14430                         return;
14431                 flow->dev_handles = dev_handle->next.next;
14432                 while (dev_handle->flex_item) {
14433                         int index = rte_bsf32(dev_handle->flex_item);
14434
14435                         mlx5_flex_release_index(dev, index);
14436                         dev_handle->flex_item &= ~RTE_BIT32(index);
14437                 }
14438                 if (dev_handle->dvh.matcher)
14439                         flow_dv_matcher_release(dev, dev_handle);
14440                 if (dev_handle->dvh.rix_sample)
14441                         flow_dv_sample_resource_release(dev, dev_handle);
14442                 if (dev_handle->dvh.rix_dest_array)
14443                         flow_dv_dest_array_resource_release(dev, dev_handle);
14444                 if (dev_handle->dvh.rix_encap_decap)
14445                         flow_dv_encap_decap_resource_release(dev,
14446                                 dev_handle->dvh.rix_encap_decap);
14447                 if (dev_handle->dvh.modify_hdr)
14448                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14449                 if (dev_handle->dvh.rix_push_vlan)
14450                         flow_dv_push_vlan_action_resource_release(dev,
14451                                                                   dev_handle);
14452                 if (dev_handle->dvh.rix_tag)
14453                         flow_dv_tag_release(dev,
14454                                             dev_handle->dvh.rix_tag);
14455                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14456                         flow_dv_fate_resource_release(dev, dev_handle);
14457                 else if (!srss)
14458                         srss = dev_handle->rix_srss;
14459                 if (fm && dev_handle->is_meter_flow_id &&
14460                     dev_handle->split_flow_id)
14461                         mlx5_ipool_free(fm->flow_ipool,
14462                                         dev_handle->split_flow_id);
14463                 else if (dev_handle->split_flow_id &&
14464                     !dev_handle->is_meter_flow_id)
14465                         mlx5_ipool_free(priv->sh->ipool
14466                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14467                                         dev_handle->split_flow_id);
14468                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14469                            tmp_idx);
14470         }
14471         if (srss)
14472                 flow_dv_shared_rss_action_release(dev, srss);
14473 }
14474
14475 /**
14476  * Release array of hash RX queue objects.
14477  * Helper function.
14478  *
14479  * @param[in] dev
14480  *   Pointer to the Ethernet device structure.
14481  * @param[in, out] hrxqs
14482  *   Array of hash RX queue objects.
14483  *
14484  * @return
14485  *   Total number of references to hash RX queue objects in *hrxqs* array
14486  *   after this operation.
14487  */
14488 static int
14489 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14490                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14491 {
14492         size_t i;
14493         int remaining = 0;
14494
14495         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14496                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14497
14498                 if (!ret)
14499                         (*hrxqs)[i] = 0;
14500                 remaining += ret;
14501         }
14502         return remaining;
14503 }
14504
14505 /**
14506  * Release all hash RX queue objects representing shared RSS action.
14507  *
14508  * @param[in] dev
14509  *   Pointer to the Ethernet device structure.
14510  * @param[in, out] action
14511  *   Shared RSS action to remove hash RX queue objects from.
14512  *
14513  * @return
14514  *   Total number of references to hash RX queue objects stored in *action*
14515  *   after this operation.
14516  *   Expected to be 0 if no external references held.
14517  */
14518 static int
14519 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14520                                  struct mlx5_shared_action_rss *shared_rss)
14521 {
14522         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14523 }
14524
14525 /**
14526  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14527  * user input.
14528  *
14529  * Only one hash value is available for one L3+L4 combination:
14530  * for example:
14531  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14532  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14533  * same slot in mlx5_rss_hash_fields.
14534  *
14535  * @param[in] rss
14536  *   Pointer to the shared action RSS conf.
14537  * @param[in, out] hash_field
14538  *   hash_field variable needed to be adjusted.
14539  *
14540  * @return
14541  *   void
14542  */
14543 static void
14544 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14545                                      uint64_t *hash_field)
14546 {
14547         uint64_t rss_types = rss->origin.types;
14548
14549         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14550         case MLX5_RSS_HASH_IPV4:
14551                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14552                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14553                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14554                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14555                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14556                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14557                         else
14558                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14559                 }
14560                 return;
14561         case MLX5_RSS_HASH_IPV6:
14562                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14563                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14564                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14565                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14566                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14567                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14568                         else
14569                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14570                 }
14571                 return;
14572         case MLX5_RSS_HASH_IPV4_UDP:
14573                 /* fall-through. */
14574         case MLX5_RSS_HASH_IPV6_UDP:
14575                 if (rss_types & RTE_ETH_RSS_UDP) {
14576                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14577                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14578                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14579                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14580                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14581                         else
14582                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14583                 }
14584                 return;
14585         case MLX5_RSS_HASH_IPV4_TCP:
14586                 /* fall-through. */
14587         case MLX5_RSS_HASH_IPV6_TCP:
14588                 if (rss_types & RTE_ETH_RSS_TCP) {
14589                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14590                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14591                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14592                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14593                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14594                         else
14595                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14596                 }
14597                 return;
14598         default:
14599                 return;
14600         }
14601 }
14602
14603 /**
14604  * Setup shared RSS action.
14605  * Prepare set of hash RX queue objects sufficient to handle all valid
14606  * hash_fields combinations (see enum ibv_rx_hash_fields).
14607  *
14608  * @param[in] dev
14609  *   Pointer to the Ethernet device structure.
14610  * @param[in] action_idx
14611  *   Shared RSS action ipool index.
14612  * @param[in, out] action
14613  *   Partially initialized shared RSS action.
14614  * @param[out] error
14615  *   Perform verbose error reporting if not NULL. Initialized in case of
14616  *   error only.
14617  *
14618  * @return
14619  *   0 on success, otherwise negative errno value.
14620  */
14621 static int
14622 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14623                            uint32_t action_idx,
14624                            struct mlx5_shared_action_rss *shared_rss,
14625                            struct rte_flow_error *error)
14626 {
14627         struct mlx5_flow_rss_desc rss_desc = { 0 };
14628         size_t i;
14629         int err;
14630
14631         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14632                 return rte_flow_error_set(error, rte_errno,
14633                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14634                                           "cannot setup indirection table");
14635         }
14636         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14637         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14638         rss_desc.const_q = shared_rss->origin.queue;
14639         rss_desc.queue_num = shared_rss->origin.queue_num;
14640         /* Set non-zero value to indicate a shared RSS. */
14641         rss_desc.shared_rss = action_idx;
14642         rss_desc.ind_tbl = shared_rss->ind_tbl;
14643         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14644                 uint32_t hrxq_idx;
14645                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14646                 int tunnel = 0;
14647
14648                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14649                 if (shared_rss->origin.level > 1) {
14650                         hash_fields |= IBV_RX_HASH_INNER;
14651                         tunnel = 1;
14652                 }
14653                 rss_desc.tunnel = tunnel;
14654                 rss_desc.hash_fields = hash_fields;
14655                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14656                 if (!hrxq_idx) {
14657                         rte_flow_error_set
14658                                 (error, rte_errno,
14659                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14660                                  "cannot get hash queue");
14661                         goto error_hrxq_new;
14662                 }
14663                 err = __flow_dv_action_rss_hrxq_set
14664                         (shared_rss, hash_fields, hrxq_idx);
14665                 MLX5_ASSERT(!err);
14666         }
14667         return 0;
14668 error_hrxq_new:
14669         err = rte_errno;
14670         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14671         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14672                 shared_rss->ind_tbl = NULL;
14673         rte_errno = err;
14674         return -rte_errno;
14675 }
14676
14677 /**
14678  * Create shared RSS action.
14679  *
14680  * @param[in] dev
14681  *   Pointer to the Ethernet device structure.
14682  * @param[in] conf
14683  *   Shared action configuration.
14684  * @param[in] rss
14685  *   RSS action specification used to create shared action.
14686  * @param[out] error
14687  *   Perform verbose error reporting if not NULL. Initialized in case of
14688  *   error only.
14689  *
14690  * @return
14691  *   A valid shared action ID in case of success, 0 otherwise and
14692  *   rte_errno is set.
14693  */
14694 static uint32_t
14695 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14696                             const struct rte_flow_indir_action_conf *conf,
14697                             const struct rte_flow_action_rss *rss,
14698                             struct rte_flow_error *error)
14699 {
14700         struct mlx5_priv *priv = dev->data->dev_private;
14701         struct mlx5_shared_action_rss *shared_rss = NULL;
14702         void *queue = NULL;
14703         struct rte_flow_action_rss *origin;
14704         const uint8_t *rss_key;
14705         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14706         uint32_t idx;
14707
14708         RTE_SET_USED(conf);
14709         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14710                             0, SOCKET_ID_ANY);
14711         shared_rss = mlx5_ipool_zmalloc
14712                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14713         if (!shared_rss || !queue) {
14714                 rte_flow_error_set(error, ENOMEM,
14715                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14716                                    "cannot allocate resource memory");
14717                 goto error_rss_init;
14718         }
14719         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14720                 rte_flow_error_set(error, E2BIG,
14721                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14722                                    "rss action number out of range");
14723                 goto error_rss_init;
14724         }
14725         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14726                                           sizeof(*shared_rss->ind_tbl),
14727                                           0, SOCKET_ID_ANY);
14728         if (!shared_rss->ind_tbl) {
14729                 rte_flow_error_set(error, ENOMEM,
14730                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14731                                    "cannot allocate resource memory");
14732                 goto error_rss_init;
14733         }
14734         memcpy(queue, rss->queue, queue_size);
14735         shared_rss->ind_tbl->queues = queue;
14736         shared_rss->ind_tbl->queues_n = rss->queue_num;
14737         origin = &shared_rss->origin;
14738         origin->func = rss->func;
14739         origin->level = rss->level;
14740         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14741         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14742         /* NULL RSS key indicates default RSS key. */
14743         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14744         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14745         origin->key = &shared_rss->key[0];
14746         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14747         origin->queue = queue;
14748         origin->queue_num = rss->queue_num;
14749         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14750                 goto error_rss_init;
14751         rte_spinlock_init(&shared_rss->action_rss_sl);
14752         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14753         rte_spinlock_lock(&priv->shared_act_sl);
14754         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14755                      &priv->rss_shared_actions, idx, shared_rss, next);
14756         rte_spinlock_unlock(&priv->shared_act_sl);
14757         return idx;
14758 error_rss_init:
14759         if (shared_rss) {
14760                 if (shared_rss->ind_tbl)
14761                         mlx5_free(shared_rss->ind_tbl);
14762                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14763                                 idx);
14764         }
14765         if (queue)
14766                 mlx5_free(queue);
14767         return 0;
14768 }
14769
14770 /**
14771  * Destroy the shared RSS action.
14772  * Release related hash RX queue objects.
14773  *
14774  * @param[in] dev
14775  *   Pointer to the Ethernet device structure.
14776  * @param[in] idx
14777  *   The shared RSS action object ID to be removed.
14778  * @param[out] error
14779  *   Perform verbose error reporting if not NULL. Initialized in case of
14780  *   error only.
14781  *
14782  * @return
14783  *   0 on success, otherwise negative errno value.
14784  */
14785 static int
14786 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14787                              struct rte_flow_error *error)
14788 {
14789         struct mlx5_priv *priv = dev->data->dev_private;
14790         struct mlx5_shared_action_rss *shared_rss =
14791             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14792         uint32_t old_refcnt = 1;
14793         int remaining;
14794         uint16_t *queue = NULL;
14795
14796         if (!shared_rss)
14797                 return rte_flow_error_set(error, EINVAL,
14798                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14799                                           "invalid shared action");
14800         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14801                                          0, 0, __ATOMIC_ACQUIRE,
14802                                          __ATOMIC_RELAXED))
14803                 return rte_flow_error_set(error, EBUSY,
14804                                           RTE_FLOW_ERROR_TYPE_ACTION,
14805                                           NULL,
14806                                           "shared rss has references");
14807         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14808         if (remaining)
14809                 return rte_flow_error_set(error, EBUSY,
14810                                           RTE_FLOW_ERROR_TYPE_ACTION,
14811                                           NULL,
14812                                           "shared rss hrxq has references");
14813         queue = shared_rss->ind_tbl->queues;
14814         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14815         if (remaining)
14816                 return rte_flow_error_set(error, EBUSY,
14817                                           RTE_FLOW_ERROR_TYPE_ACTION,
14818                                           NULL,
14819                                           "shared rss indirection table has"
14820                                           " references");
14821         mlx5_free(queue);
14822         rte_spinlock_lock(&priv->shared_act_sl);
14823         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14824                      &priv->rss_shared_actions, idx, shared_rss, next);
14825         rte_spinlock_unlock(&priv->shared_act_sl);
14826         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14827                         idx);
14828         return 0;
14829 }
14830
14831 /**
14832  * Create indirect action, lock free,
14833  * (mutex should be acquired by caller).
14834  * Dispatcher for action type specific call.
14835  *
14836  * @param[in] dev
14837  *   Pointer to the Ethernet device structure.
14838  * @param[in] conf
14839  *   Shared action configuration.
14840  * @param[in] action
14841  *   Action specification used to create indirect action.
14842  * @param[out] error
14843  *   Perform verbose error reporting if not NULL. Initialized in case of
14844  *   error only.
14845  *
14846  * @return
14847  *   A valid shared action handle in case of success, NULL otherwise and
14848  *   rte_errno is set.
14849  */
14850 static struct rte_flow_action_handle *
14851 flow_dv_action_create(struct rte_eth_dev *dev,
14852                       const struct rte_flow_indir_action_conf *conf,
14853                       const struct rte_flow_action *action,
14854                       struct rte_flow_error *err)
14855 {
14856         struct mlx5_priv *priv = dev->data->dev_private;
14857         uint32_t age_idx = 0;
14858         uint32_t idx = 0;
14859         uint32_t ret = 0;
14860
14861         switch (action->type) {
14862         case RTE_FLOW_ACTION_TYPE_RSS:
14863                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14864                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14865                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14866                 break;
14867         case RTE_FLOW_ACTION_TYPE_AGE:
14868                 age_idx = flow_dv_aso_age_alloc(dev, err);
14869                 if (!age_idx) {
14870                         ret = -rte_errno;
14871                         break;
14872                 }
14873                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14874                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14875                 flow_dv_aso_age_params_init(dev, age_idx,
14876                                         ((const struct rte_flow_action_age *)
14877                                                 action->conf)->context ?
14878                                         ((const struct rte_flow_action_age *)
14879                                                 action->conf)->context :
14880                                         (void *)(uintptr_t)idx,
14881                                         ((const struct rte_flow_action_age *)
14882                                                 action->conf)->timeout);
14883                 ret = age_idx;
14884                 break;
14885         case RTE_FLOW_ACTION_TYPE_COUNT:
14886                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14887                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14888                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14889                 break;
14890         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14891                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14892                                                          err);
14893                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14894                 break;
14895         default:
14896                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14897                                    NULL, "action type not supported");
14898                 break;
14899         }
14900         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14901 }
14902
14903 /**
14904  * Destroy the indirect action.
14905  * Release action related resources on the NIC and the memory.
14906  * Lock free, (mutex should be acquired by caller).
14907  * Dispatcher for action type specific call.
14908  *
14909  * @param[in] dev
14910  *   Pointer to the Ethernet device structure.
14911  * @param[in] handle
14912  *   The indirect action object handle to be removed.
14913  * @param[out] error
14914  *   Perform verbose error reporting if not NULL. Initialized in case of
14915  *   error only.
14916  *
14917  * @return
14918  *   0 on success, otherwise negative errno value.
14919  */
14920 static int
14921 flow_dv_action_destroy(struct rte_eth_dev *dev,
14922                        struct rte_flow_action_handle *handle,
14923                        struct rte_flow_error *error)
14924 {
14925         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14926         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14927         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14928         struct mlx5_flow_counter *cnt;
14929         uint32_t no_flow_refcnt = 1;
14930         int ret;
14931
14932         switch (type) {
14933         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14934                 return __flow_dv_action_rss_release(dev, idx, error);
14935         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14936                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14937                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14938                                                  &no_flow_refcnt, 1, false,
14939                                                  __ATOMIC_ACQUIRE,
14940                                                  __ATOMIC_RELAXED))
14941                         return rte_flow_error_set(error, EBUSY,
14942                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14943                                                   NULL,
14944                                                   "Indirect count action has references");
14945                 flow_dv_counter_free(dev, idx);
14946                 return 0;
14947         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14948                 ret = flow_dv_aso_age_release(dev, idx);
14949                 if (ret)
14950                         /*
14951                          * In this case, the last flow has a reference will
14952                          * actually release the age action.
14953                          */
14954                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14955                                 " released with references %d.", idx, ret);
14956                 return 0;
14957         case MLX5_INDIRECT_ACTION_TYPE_CT:
14958                 ret = flow_dv_aso_ct_release(dev, idx, error);
14959                 if (ret < 0)
14960                         return ret;
14961                 if (ret > 0)
14962                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14963                                 "has references %d.", idx, ret);
14964                 return 0;
14965         default:
14966                 return rte_flow_error_set(error, ENOTSUP,
14967                                           RTE_FLOW_ERROR_TYPE_ACTION,
14968                                           NULL,
14969                                           "action type not supported");
14970         }
14971 }
14972
14973 /**
14974  * Updates in place shared RSS action configuration.
14975  *
14976  * @param[in] dev
14977  *   Pointer to the Ethernet device structure.
14978  * @param[in] idx
14979  *   The shared RSS action object ID to be updated.
14980  * @param[in] action_conf
14981  *   RSS action specification used to modify *shared_rss*.
14982  * @param[out] error
14983  *   Perform verbose error reporting if not NULL. Initialized in case of
14984  *   error only.
14985  *
14986  * @return
14987  *   0 on success, otherwise negative errno value.
14988  * @note: currently only support update of RSS queues.
14989  */
14990 static int
14991 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14992                             const struct rte_flow_action_rss *action_conf,
14993                             struct rte_flow_error *error)
14994 {
14995         struct mlx5_priv *priv = dev->data->dev_private;
14996         struct mlx5_shared_action_rss *shared_rss =
14997             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14998         int ret = 0;
14999         void *queue = NULL;
15000         uint16_t *queue_old = NULL;
15001         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15002
15003         if (!shared_rss)
15004                 return rte_flow_error_set(error, EINVAL,
15005                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15006                                           "invalid shared action to update");
15007         if (priv->obj_ops.ind_table_modify == NULL)
15008                 return rte_flow_error_set(error, ENOTSUP,
15009                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15010                                           "cannot modify indirection table");
15011         queue = mlx5_malloc(MLX5_MEM_ZERO,
15012                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15013                             0, SOCKET_ID_ANY);
15014         if (!queue)
15015                 return rte_flow_error_set(error, ENOMEM,
15016                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15017                                           NULL,
15018                                           "cannot allocate resource memory");
15019         memcpy(queue, action_conf->queue, queue_size);
15020         MLX5_ASSERT(shared_rss->ind_tbl);
15021         rte_spinlock_lock(&shared_rss->action_rss_sl);
15022         queue_old = shared_rss->ind_tbl->queues;
15023         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15024                                         queue, action_conf->queue_num, true);
15025         if (ret) {
15026                 mlx5_free(queue);
15027                 ret = rte_flow_error_set(error, rte_errno,
15028                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15029                                           "cannot update indirection table");
15030         } else {
15031                 mlx5_free(queue_old);
15032                 shared_rss->origin.queue = queue;
15033                 shared_rss->origin.queue_num = action_conf->queue_num;
15034         }
15035         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15036         return ret;
15037 }
15038
15039 /*
15040  * Updates in place conntrack context or direction.
15041  * Context update should be synchronized.
15042  *
15043  * @param[in] dev
15044  *   Pointer to the Ethernet device structure.
15045  * @param[in] idx
15046  *   The conntrack object ID to be updated.
15047  * @param[in] update
15048  *   Pointer to the structure of information to update.
15049  * @param[out] error
15050  *   Perform verbose error reporting if not NULL. Initialized in case of
15051  *   error only.
15052  *
15053  * @return
15054  *   0 on success, otherwise negative errno value.
15055  */
15056 static int
15057 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15058                            const struct rte_flow_modify_conntrack *update,
15059                            struct rte_flow_error *error)
15060 {
15061         struct mlx5_priv *priv = dev->data->dev_private;
15062         struct mlx5_aso_ct_action *ct;
15063         const struct rte_flow_action_conntrack *new_prf;
15064         int ret = 0;
15065         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15066         uint32_t dev_idx;
15067
15068         if (PORT_ID(priv) != owner)
15069                 return rte_flow_error_set(error, EACCES,
15070                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15071                                           NULL,
15072                                           "CT object owned by another port");
15073         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15074         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15075         if (!ct->refcnt)
15076                 return rte_flow_error_set(error, ENOMEM,
15077                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15078                                           NULL,
15079                                           "CT object is inactive");
15080         new_prf = &update->new_ct;
15081         if (update->direction)
15082                 ct->is_original = !!new_prf->is_original_dir;
15083         if (update->state) {
15084                 /* Only validate the profile when it needs to be updated. */
15085                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15086                 if (ret)
15087                         return ret;
15088                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15089                 if (ret)
15090                         return rte_flow_error_set(error, EIO,
15091                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15092                                         NULL,
15093                                         "Failed to send CT context update WQE");
15094                 /* Block until ready or a failure. */
15095                 ret = mlx5_aso_ct_available(priv->sh, ct);
15096                 if (ret)
15097                         rte_flow_error_set(error, rte_errno,
15098                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15099                                            NULL,
15100                                            "Timeout to get the CT update");
15101         }
15102         return ret;
15103 }
15104
15105 /**
15106  * Updates in place shared action configuration, lock free,
15107  * (mutex should be acquired by caller).
15108  *
15109  * @param[in] dev
15110  *   Pointer to the Ethernet device structure.
15111  * @param[in] handle
15112  *   The indirect action object handle to be updated.
15113  * @param[in] update
15114  *   Action specification used to modify the action pointed by *handle*.
15115  *   *update* could be of same type with the action pointed by the *handle*
15116  *   handle argument, or some other structures like a wrapper, depending on
15117  *   the indirect action type.
15118  * @param[out] error
15119  *   Perform verbose error reporting if not NULL. Initialized in case of
15120  *   error only.
15121  *
15122  * @return
15123  *   0 on success, otherwise negative errno value.
15124  */
15125 static int
15126 flow_dv_action_update(struct rte_eth_dev *dev,
15127                         struct rte_flow_action_handle *handle,
15128                         const void *update,
15129                         struct rte_flow_error *err)
15130 {
15131         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15132         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15133         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15134         const void *action_conf;
15135
15136         switch (type) {
15137         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15138                 action_conf = ((const struct rte_flow_action *)update)->conf;
15139                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15140         case MLX5_INDIRECT_ACTION_TYPE_CT:
15141                 return __flow_dv_action_ct_update(dev, idx, update, err);
15142         default:
15143                 return rte_flow_error_set(err, ENOTSUP,
15144                                           RTE_FLOW_ERROR_TYPE_ACTION,
15145                                           NULL,
15146                                           "action type update not supported");
15147         }
15148 }
15149
15150 /**
15151  * Destroy the meter sub policy table rules.
15152  * Lock free, (mutex should be acquired by caller).
15153  *
15154  * @param[in] dev
15155  *   Pointer to Ethernet device.
15156  * @param[in] sub_policy
15157  *   Pointer to meter sub policy table.
15158  */
15159 static void
15160 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15161                              struct mlx5_flow_meter_sub_policy *sub_policy)
15162 {
15163         struct mlx5_priv *priv = dev->data->dev_private;
15164         struct mlx5_flow_tbl_data_entry *tbl;
15165         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15166         struct mlx5_flow_meter_info *next_fm;
15167         struct mlx5_sub_policy_color_rule *color_rule;
15168         void *tmp;
15169         uint32_t i;
15170
15171         for (i = 0; i < RTE_COLORS; i++) {
15172                 next_fm = NULL;
15173                 if (i == RTE_COLOR_GREEN && policy &&
15174                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15175                         next_fm = mlx5_flow_meter_find(priv,
15176                                         policy->act_cnt[i].next_mtr_id, NULL);
15177                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15178                                    next_port, tmp) {
15179                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15180                         tbl = container_of(color_rule->matcher->tbl,
15181                                            typeof(*tbl), tbl);
15182                         mlx5_list_unregister(tbl->matchers,
15183                                              &color_rule->matcher->entry);
15184                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15185                                      color_rule, next_port);
15186                         mlx5_free(color_rule);
15187                         if (next_fm)
15188                                 mlx5_flow_meter_detach(priv, next_fm);
15189                 }
15190         }
15191         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15192                 if (sub_policy->rix_hrxq[i]) {
15193                         if (policy && !policy->is_hierarchy)
15194                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15195                         sub_policy->rix_hrxq[i] = 0;
15196                 }
15197                 if (sub_policy->jump_tbl[i]) {
15198                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15199                                                      sub_policy->jump_tbl[i]);
15200                         sub_policy->jump_tbl[i] = NULL;
15201                 }
15202         }
15203         if (sub_policy->tbl_rsc) {
15204                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15205                                              sub_policy->tbl_rsc);
15206                 sub_policy->tbl_rsc = NULL;
15207         }
15208 }
15209
15210 /**
15211  * Destroy policy rules, lock free,
15212  * (mutex should be acquired by caller).
15213  * Dispatcher for action type specific call.
15214  *
15215  * @param[in] dev
15216  *   Pointer to the Ethernet device structure.
15217  * @param[in] mtr_policy
15218  *   Meter policy struct.
15219  */
15220 static void
15221 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15222                              struct mlx5_flow_meter_policy *mtr_policy)
15223 {
15224         uint32_t i, j;
15225         struct mlx5_flow_meter_sub_policy *sub_policy;
15226         uint16_t sub_policy_num;
15227
15228         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15229                 sub_policy_num = (mtr_policy->sub_policy_num >>
15230                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15231                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15232                 for (j = 0; j < sub_policy_num; j++) {
15233                         sub_policy = mtr_policy->sub_policys[i][j];
15234                         if (sub_policy)
15235                                 __flow_dv_destroy_sub_policy_rules(dev,
15236                                                                    sub_policy);
15237                 }
15238         }
15239 }
15240
15241 /**
15242  * Destroy policy action, lock free,
15243  * (mutex should be acquired by caller).
15244  * Dispatcher for action type specific call.
15245  *
15246  * @param[in] dev
15247  *   Pointer to the Ethernet device structure.
15248  * @param[in] mtr_policy
15249  *   Meter policy struct.
15250  */
15251 static void
15252 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15253                       struct mlx5_flow_meter_policy *mtr_policy)
15254 {
15255         struct rte_flow_action *rss_action;
15256         struct mlx5_flow_handle dev_handle;
15257         uint32_t i, j;
15258
15259         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15260                 if (mtr_policy->act_cnt[i].rix_mark) {
15261                         flow_dv_tag_release(dev,
15262                                 mtr_policy->act_cnt[i].rix_mark);
15263                         mtr_policy->act_cnt[i].rix_mark = 0;
15264                 }
15265                 if (mtr_policy->act_cnt[i].modify_hdr) {
15266                         dev_handle.dvh.modify_hdr =
15267                                 mtr_policy->act_cnt[i].modify_hdr;
15268                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15269                 }
15270                 switch (mtr_policy->act_cnt[i].fate_action) {
15271                 case MLX5_FLOW_FATE_SHARED_RSS:
15272                         rss_action = mtr_policy->act_cnt[i].rss;
15273                         mlx5_free(rss_action);
15274                         break;
15275                 case MLX5_FLOW_FATE_PORT_ID:
15276                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15277                                 flow_dv_port_id_action_resource_release(dev,
15278                                 mtr_policy->act_cnt[i].rix_port_id_action);
15279                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15280                         }
15281                         break;
15282                 case MLX5_FLOW_FATE_DROP:
15283                 case MLX5_FLOW_FATE_JUMP:
15284                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15285                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15286                                                 NULL;
15287                         break;
15288                 default:
15289                         /*Queue action do nothing*/
15290                         break;
15291                 }
15292         }
15293         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15294                 mtr_policy->dr_drop_action[j] = NULL;
15295 }
15296
15297 /**
15298  * Create policy action per domain, lock free,
15299  * (mutex should be acquired by caller).
15300  * Dispatcher for action type specific call.
15301  *
15302  * @param[in] dev
15303  *   Pointer to the Ethernet device structure.
15304  * @param[in] mtr_policy
15305  *   Meter policy struct.
15306  * @param[in] action
15307  *   Action specification used to create meter actions.
15308  * @param[out] error
15309  *   Perform verbose error reporting if not NULL. Initialized in case of
15310  *   error only.
15311  *
15312  * @return
15313  *   0 on success, otherwise negative errno value.
15314  */
15315 static int
15316 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15317                         struct mlx5_flow_meter_policy *mtr_policy,
15318                         const struct rte_flow_action *actions[RTE_COLORS],
15319                         enum mlx5_meter_domain domain,
15320                         struct rte_mtr_error *error)
15321 {
15322         struct mlx5_priv *priv = dev->data->dev_private;
15323         struct rte_flow_error flow_err;
15324         const struct rte_flow_action *act;
15325         uint64_t action_flags;
15326         struct mlx5_flow_handle dh;
15327         struct mlx5_flow dev_flow;
15328         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15329         int i, ret;
15330         uint8_t egress, transfer;
15331         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15332         union {
15333                 struct mlx5_flow_dv_modify_hdr_resource res;
15334                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15335                             sizeof(struct mlx5_modification_cmd) *
15336                             (MLX5_MAX_MODIFY_NUM + 1)];
15337         } mhdr_dummy;
15338         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15339
15340         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15341         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15342         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15343         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15344         memset(&port_id_action, 0,
15345                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15346         memset(mhdr_res, 0, sizeof(*mhdr_res));
15347         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15348                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15349                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15350         dev_flow.handle = &dh;
15351         dev_flow.dv.port_id_action = &port_id_action;
15352         dev_flow.external = true;
15353         for (i = 0; i < RTE_COLORS; i++) {
15354                 if (i < MLX5_MTR_RTE_COLORS)
15355                         act_cnt = &mtr_policy->act_cnt[i];
15356                 /* Skip the color policy actions creation. */
15357                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15358                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15359                         continue;
15360                 action_flags = 0;
15361                 for (act = actions[i];
15362                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15363                         switch (act->type) {
15364                         case RTE_FLOW_ACTION_TYPE_MARK:
15365                         {
15366                                 uint32_t tag_be = mlx5_flow_mark_set
15367                                         (((const struct rte_flow_action_mark *)
15368                                         (act->conf))->id);
15369
15370                                 if (i >= MLX5_MTR_RTE_COLORS)
15371                                         return -rte_mtr_error_set(error,
15372                                           ENOTSUP,
15373                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15374                                           NULL,
15375                                           "cannot create policy "
15376                                           "mark action for this color");
15377                                 dev_flow.handle->mark = 1;
15378                                 if (flow_dv_tag_resource_register(dev, tag_be,
15379                                                   &dev_flow, &flow_err))
15380                                         return -rte_mtr_error_set(error,
15381                                         ENOTSUP,
15382                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15383                                         NULL,
15384                                         "cannot setup policy mark action");
15385                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15386                                 act_cnt->rix_mark =
15387                                         dev_flow.handle->dvh.rix_tag;
15388                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15389                                 break;
15390                         }
15391                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15392                                 if (i >= MLX5_MTR_RTE_COLORS)
15393                                         return -rte_mtr_error_set(error,
15394                                           ENOTSUP,
15395                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15396                                           NULL,
15397                                           "cannot create policy "
15398                                           "set tag action for this color");
15399                                 if (flow_dv_convert_action_set_tag
15400                                 (dev, mhdr_res,
15401                                 (const struct rte_flow_action_set_tag *)
15402                                 act->conf,  &flow_err))
15403                                         return -rte_mtr_error_set(error,
15404                                         ENOTSUP,
15405                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15406                                         NULL, "cannot convert policy "
15407                                         "set tag action");
15408                                 if (!mhdr_res->actions_num)
15409                                         return -rte_mtr_error_set(error,
15410                                         ENOTSUP,
15411                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15412                                         NULL, "cannot find policy "
15413                                         "set tag action");
15414                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15415                                 break;
15416                         case RTE_FLOW_ACTION_TYPE_DROP:
15417                         {
15418                                 struct mlx5_flow_mtr_mng *mtrmng =
15419                                                 priv->sh->mtrmng;
15420                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15421
15422                                 /*
15423                                  * Create the drop table with
15424                                  * METER DROP level.
15425                                  */
15426                                 if (!mtrmng->drop_tbl[domain]) {
15427                                         mtrmng->drop_tbl[domain] =
15428                                         flow_dv_tbl_resource_get(dev,
15429                                         MLX5_FLOW_TABLE_LEVEL_METER,
15430                                         egress, transfer, false, NULL, 0,
15431                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15432                                         if (!mtrmng->drop_tbl[domain])
15433                                                 return -rte_mtr_error_set
15434                                         (error, ENOTSUP,
15435                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15436                                         NULL,
15437                                         "Failed to create meter drop table");
15438                                 }
15439                                 tbl_data = container_of
15440                                 (mtrmng->drop_tbl[domain],
15441                                 struct mlx5_flow_tbl_data_entry, tbl);
15442                                 if (i < MLX5_MTR_RTE_COLORS) {
15443                                         act_cnt->dr_jump_action[domain] =
15444                                                 tbl_data->jump.action;
15445                                         act_cnt->fate_action =
15446                                                 MLX5_FLOW_FATE_DROP;
15447                                 }
15448                                 if (i == RTE_COLOR_RED)
15449                                         mtr_policy->dr_drop_action[domain] =
15450                                                 tbl_data->jump.action;
15451                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15452                                 break;
15453                         }
15454                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15455                         {
15456                                 if (i >= MLX5_MTR_RTE_COLORS)
15457                                         return -rte_mtr_error_set(error,
15458                                         ENOTSUP,
15459                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15460                                         NULL, "cannot create policy "
15461                                         "fate queue for this color");
15462                                 act_cnt->queue =
15463                                 ((const struct rte_flow_action_queue *)
15464                                         (act->conf))->index;
15465                                 act_cnt->fate_action =
15466                                         MLX5_FLOW_FATE_QUEUE;
15467                                 dev_flow.handle->fate_action =
15468                                         MLX5_FLOW_FATE_QUEUE;
15469                                 mtr_policy->is_queue = 1;
15470                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15471                                 break;
15472                         }
15473                         case RTE_FLOW_ACTION_TYPE_RSS:
15474                         {
15475                                 int rss_size;
15476
15477                                 if (i >= MLX5_MTR_RTE_COLORS)
15478                                         return -rte_mtr_error_set(error,
15479                                           ENOTSUP,
15480                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15481                                           NULL,
15482                                           "cannot create policy "
15483                                           "rss action for this color");
15484                                 /*
15485                                  * Save RSS conf into policy struct
15486                                  * for translate stage.
15487                                  */
15488                                 rss_size = (int)rte_flow_conv
15489                                         (RTE_FLOW_CONV_OP_ACTION,
15490                                         NULL, 0, act, &flow_err);
15491                                 if (rss_size <= 0)
15492                                         return -rte_mtr_error_set(error,
15493                                           ENOTSUP,
15494                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15495                                           NULL, "Get the wrong "
15496                                           "rss action struct size");
15497                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15498                                                 rss_size, 0, SOCKET_ID_ANY);
15499                                 if (!act_cnt->rss)
15500                                         return -rte_mtr_error_set(error,
15501                                           ENOTSUP,
15502                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15503                                           NULL,
15504                                           "Fail to malloc rss action memory");
15505                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15506                                         act_cnt->rss, rss_size,
15507                                         act, &flow_err);
15508                                 if (ret < 0)
15509                                         return -rte_mtr_error_set(error,
15510                                           ENOTSUP,
15511                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15512                                           NULL, "Fail to save "
15513                                           "rss action into policy struct");
15514                                 act_cnt->fate_action =
15515                                         MLX5_FLOW_FATE_SHARED_RSS;
15516                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15517                                 break;
15518                         }
15519                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15520                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15521                         {
15522                                 struct mlx5_flow_dv_port_id_action_resource
15523                                         port_id_resource;
15524                                 uint32_t port_id = 0;
15525
15526                                 if (i >= MLX5_MTR_RTE_COLORS)
15527                                         return -rte_mtr_error_set(error,
15528                                         ENOTSUP,
15529                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15530                                         NULL, "cannot create policy "
15531                                         "port action for this color");
15532                                 memset(&port_id_resource, 0,
15533                                         sizeof(port_id_resource));
15534                                 if (flow_dv_translate_action_port_id(dev, act,
15535                                                 &port_id, &flow_err))
15536                                         return -rte_mtr_error_set(error,
15537                                         ENOTSUP,
15538                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15539                                         NULL, "cannot translate "
15540                                         "policy port action");
15541                                 port_id_resource.port_id = port_id;
15542                                 if (flow_dv_port_id_action_resource_register
15543                                         (dev, &port_id_resource,
15544                                         &dev_flow, &flow_err))
15545                                         return -rte_mtr_error_set(error,
15546                                         ENOTSUP,
15547                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15548                                         NULL, "cannot setup "
15549                                         "policy port action");
15550                                 act_cnt->rix_port_id_action =
15551                                         dev_flow.handle->rix_port_id_action;
15552                                 act_cnt->fate_action =
15553                                         MLX5_FLOW_FATE_PORT_ID;
15554                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15555                                 break;
15556                         }
15557                         case RTE_FLOW_ACTION_TYPE_JUMP:
15558                         {
15559                                 uint32_t jump_group = 0;
15560                                 uint32_t table = 0;
15561                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15562                                 struct flow_grp_info grp_info = {
15563                                         .external = !!dev_flow.external,
15564                                         .transfer = !!transfer,
15565                                         .fdb_def_rule = !!priv->fdb_def_rule,
15566                                         .std_tbl_fix = 0,
15567                                         .skip_scale = dev_flow.skip_scale &
15568                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15569                                 };
15570                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15571                                         mtr_policy->sub_policys[domain][0];
15572
15573                                 if (i >= MLX5_MTR_RTE_COLORS)
15574                                         return -rte_mtr_error_set(error,
15575                                           ENOTSUP,
15576                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15577                                           NULL,
15578                                           "cannot create policy "
15579                                           "jump action for this color");
15580                                 jump_group =
15581                                 ((const struct rte_flow_action_jump *)
15582                                                         act->conf)->group;
15583                                 if (mlx5_flow_group_to_table(dev, NULL,
15584                                                        jump_group,
15585                                                        &table,
15586                                                        &grp_info, &flow_err))
15587                                         return -rte_mtr_error_set(error,
15588                                         ENOTSUP,
15589                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15590                                         NULL, "cannot setup "
15591                                         "policy jump action");
15592                                 sub_policy->jump_tbl[i] =
15593                                 flow_dv_tbl_resource_get(dev,
15594                                         table, egress,
15595                                         transfer,
15596                                         !!dev_flow.external,
15597                                         NULL, jump_group, 0,
15598                                         0, &flow_err);
15599                                 if
15600                                 (!sub_policy->jump_tbl[i])
15601                                         return  -rte_mtr_error_set(error,
15602                                         ENOTSUP,
15603                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15604                                         NULL, "cannot create jump action.");
15605                                 tbl_data = container_of
15606                                 (sub_policy->jump_tbl[i],
15607                                 struct mlx5_flow_tbl_data_entry, tbl);
15608                                 act_cnt->dr_jump_action[domain] =
15609                                         tbl_data->jump.action;
15610                                 act_cnt->fate_action =
15611                                         MLX5_FLOW_FATE_JUMP;
15612                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15613                                 break;
15614                         }
15615                         /*
15616                          * No need to check meter hierarchy for Y or R colors
15617                          * here since it is done in the validation stage.
15618                          */
15619                         case RTE_FLOW_ACTION_TYPE_METER:
15620                         {
15621                                 const struct rte_flow_action_meter *mtr;
15622                                 struct mlx5_flow_meter_info *next_fm;
15623                                 struct mlx5_flow_meter_policy *next_policy;
15624                                 struct rte_flow_action tag_action;
15625                                 struct mlx5_rte_flow_action_set_tag set_tag;
15626                                 uint32_t next_mtr_idx = 0;
15627
15628                                 mtr = act->conf;
15629                                 next_fm = mlx5_flow_meter_find(priv,
15630                                                         mtr->mtr_id,
15631                                                         &next_mtr_idx);
15632                                 if (!next_fm)
15633                                         return -rte_mtr_error_set(error, EINVAL,
15634                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15635                                                 "Fail to find next meter.");
15636                                 if (next_fm->def_policy)
15637                                         return -rte_mtr_error_set(error, EINVAL,
15638                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15639                                 "Hierarchy only supports termination meter.");
15640                                 next_policy = mlx5_flow_meter_policy_find(dev,
15641                                                 next_fm->policy_id, NULL);
15642                                 MLX5_ASSERT(next_policy);
15643                                 if (next_fm->drop_cnt) {
15644                                         set_tag.id =
15645                                                 (enum modify_reg)
15646                                                 mlx5_flow_get_reg_id(dev,
15647                                                 MLX5_MTR_ID,
15648                                                 0,
15649                                                 (struct rte_flow_error *)error);
15650                                         set_tag.offset = (priv->mtr_reg_share ?
15651                                                 MLX5_MTR_COLOR_BITS : 0);
15652                                         set_tag.length = (priv->mtr_reg_share ?
15653                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15654                                                MLX5_REG_BITS);
15655                                         set_tag.data = next_mtr_idx;
15656                                         tag_action.type =
15657                                                 (enum rte_flow_action_type)
15658                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15659                                         tag_action.conf = &set_tag;
15660                                         if (flow_dv_convert_action_set_reg
15661                                                 (mhdr_res, &tag_action,
15662                                                 (struct rte_flow_error *)error))
15663                                                 return -rte_errno;
15664                                         action_flags |=
15665                                                 MLX5_FLOW_ACTION_SET_TAG;
15666                                 }
15667                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15668                                 act_cnt->next_mtr_id = next_fm->meter_id;
15669                                 act_cnt->next_sub_policy = NULL;
15670                                 mtr_policy->is_hierarchy = 1;
15671                                 mtr_policy->dev = next_policy->dev;
15672                                 action_flags |=
15673                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15674                                 break;
15675                         }
15676                         default:
15677                                 return -rte_mtr_error_set(error, ENOTSUP,
15678                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15679                                           NULL, "action type not supported");
15680                         }
15681                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15682                                 /* create modify action if needed. */
15683                                 dev_flow.dv.group = 1;
15684                                 if (flow_dv_modify_hdr_resource_register
15685                                         (dev, mhdr_res, &dev_flow, &flow_err))
15686                                         return -rte_mtr_error_set(error,
15687                                                 ENOTSUP,
15688                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15689                                                 NULL, "cannot register policy "
15690                                                 "set tag action");
15691                                 act_cnt->modify_hdr =
15692                                         dev_flow.handle->dvh.modify_hdr;
15693                         }
15694                 }
15695         }
15696         return 0;
15697 }
15698
15699 /**
15700  * Create policy action per domain, lock free,
15701  * (mutex should be acquired by caller).
15702  * Dispatcher for action type specific call.
15703  *
15704  * @param[in] dev
15705  *   Pointer to the Ethernet device structure.
15706  * @param[in] mtr_policy
15707  *   Meter policy struct.
15708  * @param[in] action
15709  *   Action specification used to create meter actions.
15710  * @param[out] error
15711  *   Perform verbose error reporting if not NULL. Initialized in case of
15712  *   error only.
15713  *
15714  * @return
15715  *   0 on success, otherwise negative errno value.
15716  */
15717 static int
15718 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15719                       struct mlx5_flow_meter_policy *mtr_policy,
15720                       const struct rte_flow_action *actions[RTE_COLORS],
15721                       struct rte_mtr_error *error)
15722 {
15723         int ret, i;
15724         uint16_t sub_policy_num;
15725
15726         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15727                 sub_policy_num = (mtr_policy->sub_policy_num >>
15728                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15729                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15730                 if (sub_policy_num) {
15731                         ret = __flow_dv_create_domain_policy_acts(dev,
15732                                 mtr_policy, actions,
15733                                 (enum mlx5_meter_domain)i, error);
15734                         /* Cleaning resource is done in the caller level. */
15735                         if (ret)
15736                                 return ret;
15737                 }
15738         }
15739         return 0;
15740 }
15741
15742 /**
15743  * Query a DV flow rule for its statistics via DevX.
15744  *
15745  * @param[in] dev
15746  *   Pointer to Ethernet device.
15747  * @param[in] cnt_idx
15748  *   Index to the flow counter.
15749  * @param[out] data
15750  *   Data retrieved by the query.
15751  * @param[out] error
15752  *   Perform verbose error reporting if not NULL.
15753  *
15754  * @return
15755  *   0 on success, a negative errno value otherwise and rte_errno is set.
15756  */
15757 int
15758 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15759                     struct rte_flow_error *error)
15760 {
15761         struct mlx5_priv *priv = dev->data->dev_private;
15762         struct rte_flow_query_count *qc = data;
15763
15764         if (!priv->sh->devx)
15765                 return rte_flow_error_set(error, ENOTSUP,
15766                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15767                                           NULL,
15768                                           "counters are not supported");
15769         if (cnt_idx) {
15770                 uint64_t pkts, bytes;
15771                 struct mlx5_flow_counter *cnt;
15772                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15773
15774                 if (err)
15775                         return rte_flow_error_set(error, -err,
15776                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15777                                         NULL, "cannot read counters");
15778                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15779                 qc->hits_set = 1;
15780                 qc->bytes_set = 1;
15781                 qc->hits = pkts - cnt->hits;
15782                 qc->bytes = bytes - cnt->bytes;
15783                 if (qc->reset) {
15784                         cnt->hits = pkts;
15785                         cnt->bytes = bytes;
15786                 }
15787                 return 0;
15788         }
15789         return rte_flow_error_set(error, EINVAL,
15790                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15791                                   NULL,
15792                                   "counters are not available");
15793 }
15794
15795
15796 /**
15797  * Query counter's action pointer for a DV flow rule via DevX.
15798  *
15799  * @param[in] dev
15800  *   Pointer to Ethernet device.
15801  * @param[in] cnt_idx
15802  *   Index to the flow counter.
15803  * @param[out] action_ptr
15804  *   Action pointer for counter.
15805  * @param[out] error
15806  *   Perform verbose error reporting if not NULL.
15807  *
15808  * @return
15809  *   0 on success, a negative errno value otherwise and rte_errno is set.
15810  */
15811 int
15812 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15813         void **action_ptr, struct rte_flow_error *error)
15814 {
15815         struct mlx5_priv *priv = dev->data->dev_private;
15816
15817         if (!priv->sh->devx || !action_ptr)
15818                 return rte_flow_error_set(error, ENOTSUP,
15819                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15820                                           NULL,
15821                                           "counters are not supported");
15822
15823         if (cnt_idx) {
15824                 struct mlx5_flow_counter *cnt = NULL;
15825                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15826                 if (cnt) {
15827                         *action_ptr = cnt->action;
15828                         return 0;
15829                 }
15830         }
15831         return rte_flow_error_set(error, EINVAL,
15832                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15833                                   NULL,
15834                                   "counters are not available");
15835 }
15836
15837 static int
15838 flow_dv_action_query(struct rte_eth_dev *dev,
15839                      const struct rte_flow_action_handle *handle, void *data,
15840                      struct rte_flow_error *error)
15841 {
15842         struct mlx5_age_param *age_param;
15843         struct rte_flow_query_age *resp;
15844         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15845         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15846         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15847         struct mlx5_priv *priv = dev->data->dev_private;
15848         struct mlx5_aso_ct_action *ct;
15849         uint16_t owner;
15850         uint32_t dev_idx;
15851
15852         switch (type) {
15853         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15854                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15855                 resp = data;
15856                 resp->aged = __atomic_load_n(&age_param->state,
15857                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15858                                                                           1 : 0;
15859                 resp->sec_since_last_hit_valid = !resp->aged;
15860                 if (resp->sec_since_last_hit_valid)
15861                         resp->sec_since_last_hit = __atomic_load_n
15862                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15863                 return 0;
15864         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15865                 return flow_dv_query_count(dev, idx, data, error);
15866         case MLX5_INDIRECT_ACTION_TYPE_CT:
15867                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15868                 if (owner != PORT_ID(priv))
15869                         return rte_flow_error_set(error, EACCES,
15870                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15871                                         NULL,
15872                                         "CT object owned by another port");
15873                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15874                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15875                 MLX5_ASSERT(ct);
15876                 if (!ct->refcnt)
15877                         return rte_flow_error_set(error, EFAULT,
15878                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15879                                         NULL,
15880                                         "CT object is inactive");
15881                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15882                                                         ct->peer;
15883                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15884                                                         ct->is_original;
15885                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15886                         return rte_flow_error_set(error, EIO,
15887                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15888                                         NULL,
15889                                         "Failed to query CT context");
15890                 return 0;
15891         default:
15892                 return rte_flow_error_set(error, ENOTSUP,
15893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15894                                           "action type query not supported");
15895         }
15896 }
15897
15898 /**
15899  * Query a flow rule AGE action for aging information.
15900  *
15901  * @param[in] dev
15902  *   Pointer to Ethernet device.
15903  * @param[in] flow
15904  *   Pointer to the sub flow.
15905  * @param[out] data
15906  *   data retrieved by the query.
15907  * @param[out] error
15908  *   Perform verbose error reporting if not NULL.
15909  *
15910  * @return
15911  *   0 on success, a negative errno value otherwise and rte_errno is set.
15912  */
15913 static int
15914 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15915                   void *data, struct rte_flow_error *error)
15916 {
15917         struct rte_flow_query_age *resp = data;
15918         struct mlx5_age_param *age_param;
15919
15920         if (flow->age) {
15921                 struct mlx5_aso_age_action *act =
15922                                      flow_aso_age_get_by_idx(dev, flow->age);
15923
15924                 age_param = &act->age_params;
15925         } else if (flow->counter) {
15926                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15927
15928                 if (!age_param || !age_param->timeout)
15929                         return rte_flow_error_set
15930                                         (error, EINVAL,
15931                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15932                                          NULL, "cannot read age data");
15933         } else {
15934                 return rte_flow_error_set(error, EINVAL,
15935                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15936                                           NULL, "age data not available");
15937         }
15938         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15939                                      AGE_TMOUT ? 1 : 0;
15940         resp->sec_since_last_hit_valid = !resp->aged;
15941         if (resp->sec_since_last_hit_valid)
15942                 resp->sec_since_last_hit = __atomic_load_n
15943                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15944         return 0;
15945 }
15946
15947 /**
15948  * Query a flow.
15949  *
15950  * @see rte_flow_query()
15951  * @see rte_flow_ops
15952  */
15953 static int
15954 flow_dv_query(struct rte_eth_dev *dev,
15955               struct rte_flow *flow __rte_unused,
15956               const struct rte_flow_action *actions __rte_unused,
15957               void *data __rte_unused,
15958               struct rte_flow_error *error __rte_unused)
15959 {
15960         int ret = -EINVAL;
15961
15962         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15963                 switch (actions->type) {
15964                 case RTE_FLOW_ACTION_TYPE_VOID:
15965                         break;
15966                 case RTE_FLOW_ACTION_TYPE_COUNT:
15967                         ret = flow_dv_query_count(dev, flow->counter, data,
15968                                                   error);
15969                         break;
15970                 case RTE_FLOW_ACTION_TYPE_AGE:
15971                         ret = flow_dv_query_age(dev, flow, data, error);
15972                         break;
15973                 default:
15974                         return rte_flow_error_set(error, ENOTSUP,
15975                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15976                                                   actions,
15977                                                   "action not supported");
15978                 }
15979         }
15980         return ret;
15981 }
15982
15983 /**
15984  * Destroy the meter table set.
15985  * Lock free, (mutex should be acquired by caller).
15986  *
15987  * @param[in] dev
15988  *   Pointer to Ethernet device.
15989  * @param[in] fm
15990  *   Meter information table.
15991  */
15992 static void
15993 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15994                         struct mlx5_flow_meter_info *fm)
15995 {
15996         struct mlx5_priv *priv = dev->data->dev_private;
15997         int i;
15998
15999         if (!fm || !priv->config.dv_flow_en)
16000                 return;
16001         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16002                 if (fm->drop_rule[i]) {
16003                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16004                         fm->drop_rule[i] = NULL;
16005                 }
16006         }
16007 }
16008
16009 static void
16010 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16011 {
16012         struct mlx5_priv *priv = dev->data->dev_private;
16013         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16014         struct mlx5_flow_tbl_data_entry *tbl;
16015         int i, j;
16016
16017         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16018                 if (mtrmng->def_rule[i]) {
16019                         claim_zero(mlx5_flow_os_destroy_flow
16020                                         (mtrmng->def_rule[i]));
16021                         mtrmng->def_rule[i] = NULL;
16022                 }
16023                 if (mtrmng->def_matcher[i]) {
16024                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16025                                 struct mlx5_flow_tbl_data_entry, tbl);
16026                         mlx5_list_unregister(tbl->matchers,
16027                                              &mtrmng->def_matcher[i]->entry);
16028                         mtrmng->def_matcher[i] = NULL;
16029                 }
16030                 for (j = 0; j < MLX5_REG_BITS; j++) {
16031                         if (mtrmng->drop_matcher[i][j]) {
16032                                 tbl =
16033                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16034                                              struct mlx5_flow_tbl_data_entry,
16035                                              tbl);
16036                                 mlx5_list_unregister(tbl->matchers,
16037                                             &mtrmng->drop_matcher[i][j]->entry);
16038                                 mtrmng->drop_matcher[i][j] = NULL;
16039                         }
16040                 }
16041                 if (mtrmng->drop_tbl[i]) {
16042                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16043                                 mtrmng->drop_tbl[i]);
16044                         mtrmng->drop_tbl[i] = NULL;
16045                 }
16046         }
16047 }
16048
16049 /* Number of meter flow actions, count and jump or count and drop. */
16050 #define METER_ACTIONS 2
16051
16052 static void
16053 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16054                                     enum mlx5_meter_domain domain)
16055 {
16056         struct mlx5_priv *priv = dev->data->dev_private;
16057         struct mlx5_flow_meter_def_policy *def_policy =
16058                         priv->sh->mtrmng->def_policy[domain];
16059
16060         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16061         mlx5_free(def_policy);
16062         priv->sh->mtrmng->def_policy[domain] = NULL;
16063 }
16064
16065 /**
16066  * Destroy the default policy table set.
16067  *
16068  * @param[in] dev
16069  *   Pointer to Ethernet device.
16070  */
16071 static void
16072 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16073 {
16074         struct mlx5_priv *priv = dev->data->dev_private;
16075         int i;
16076
16077         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16078                 if (priv->sh->mtrmng->def_policy[i])
16079                         __flow_dv_destroy_domain_def_policy(dev,
16080                                         (enum mlx5_meter_domain)i);
16081         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16082 }
16083
16084 static int
16085 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16086                         uint32_t color_reg_c_idx,
16087                         enum rte_color color, void *matcher_object,
16088                         int actions_n, void *actions,
16089                         bool match_src_port, const struct rte_flow_item *item,
16090                         void **rule, const struct rte_flow_attr *attr)
16091 {
16092         int ret;
16093         struct mlx5_flow_dv_match_params value = {
16094                 .size = sizeof(value.buf),
16095         };
16096         struct mlx5_flow_dv_match_params matcher = {
16097                 .size = sizeof(matcher.buf),
16098         };
16099         struct mlx5_priv *priv = dev->data->dev_private;
16100         uint8_t misc_mask;
16101
16102         if (match_src_port && (priv->representor || priv->master)) {
16103                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16104                                                    value.buf, item, attr)) {
16105                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16106                                 " value with port.", color);
16107                         return -1;
16108                 }
16109         }
16110         flow_dv_match_meta_reg(matcher.buf, value.buf,
16111                                (enum modify_reg)color_reg_c_idx,
16112                                rte_col_2_mlx5_col(color), UINT32_MAX);
16113         misc_mask = flow_dv_matcher_enable(value.buf);
16114         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16115         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16116                                        actions_n, actions, rule);
16117         if (ret) {
16118                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16119                 return -1;
16120         }
16121         return 0;
16122 }
16123
16124 static int
16125 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16126                         uint32_t color_reg_c_idx,
16127                         uint16_t priority,
16128                         struct mlx5_flow_meter_sub_policy *sub_policy,
16129                         const struct rte_flow_attr *attr,
16130                         bool match_src_port,
16131                         const struct rte_flow_item *item,
16132                         struct mlx5_flow_dv_matcher **policy_matcher,
16133                         struct rte_flow_error *error)
16134 {
16135         struct mlx5_list_entry *entry;
16136         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16137         struct mlx5_flow_dv_matcher matcher = {
16138                 .mask = {
16139                         .size = sizeof(matcher.mask.buf),
16140                 },
16141                 .tbl = tbl_rsc,
16142         };
16143         struct mlx5_flow_dv_match_params value = {
16144                 .size = sizeof(value.buf),
16145         };
16146         struct mlx5_flow_cb_ctx ctx = {
16147                 .error = error,
16148                 .data = &matcher,
16149         };
16150         struct mlx5_flow_tbl_data_entry *tbl_data;
16151         struct mlx5_priv *priv = dev->data->dev_private;
16152         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16153
16154         if (match_src_port && (priv->representor || priv->master)) {
16155                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16156                                                    value.buf, item, attr)) {
16157                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16158                                 " with port.", priority);
16159                         return -1;
16160                 }
16161         }
16162         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16163         if (priority < RTE_COLOR_RED)
16164                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16165                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16166         matcher.priority = priority;
16167         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16168                                     matcher.mask.size);
16169         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16170         if (!entry) {
16171                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16172                 return -1;
16173         }
16174         *policy_matcher =
16175                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16176         return 0;
16177 }
16178
16179 /**
16180  * Create the policy rules per domain.
16181  *
16182  * @param[in] dev
16183  *   Pointer to Ethernet device.
16184  * @param[in] sub_policy
16185  *    Pointer to sub policy table..
16186  * @param[in] egress
16187  *   Direction of the table.
16188  * @param[in] transfer
16189  *   E-Switch or NIC flow.
16190  * @param[in] acts
16191  *   Pointer to policy action list per color.
16192  *
16193  * @return
16194  *   0 on success, -1 otherwise.
16195  */
16196 static int
16197 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16198                 struct mlx5_flow_meter_sub_policy *sub_policy,
16199                 uint8_t egress, uint8_t transfer, bool match_src_port,
16200                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16201 {
16202         struct mlx5_priv *priv = dev->data->dev_private;
16203         struct rte_flow_error flow_err;
16204         uint32_t color_reg_c_idx;
16205         struct rte_flow_attr attr = {
16206                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16207                 .priority = 0,
16208                 .ingress = 0,
16209                 .egress = !!egress,
16210                 .transfer = !!transfer,
16211                 .reserved = 0,
16212         };
16213         int i;
16214         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16215         struct mlx5_sub_policy_color_rule *color_rule;
16216         bool svport_match;
16217         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16218
16219         if (ret < 0)
16220                 return -1;
16221         /* Create policy table with POLICY level. */
16222         if (!sub_policy->tbl_rsc)
16223                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16224                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16225                                 egress, transfer, false, NULL, 0, 0,
16226                                 sub_policy->idx, &flow_err);
16227         if (!sub_policy->tbl_rsc) {
16228                 DRV_LOG(ERR,
16229                         "Failed to create meter sub policy table.");
16230                 return -1;
16231         }
16232         /* Prepare matchers. */
16233         color_reg_c_idx = ret;
16234         for (i = 0; i < RTE_COLORS; i++) {
16235                 TAILQ_INIT(&sub_policy->color_rules[i]);
16236                 if (!acts[i].actions_n)
16237                         continue;
16238                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16239                                 sizeof(struct mlx5_sub_policy_color_rule),
16240                                 0, SOCKET_ID_ANY);
16241                 if (!color_rule) {
16242                         DRV_LOG(ERR, "No memory to create color rule.");
16243                         goto err_exit;
16244                 }
16245                 tmp_rules[i] = color_rule;
16246                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16247                                   color_rule, next_port);
16248                 color_rule->src_port = priv->representor_id;
16249                 /* No use. */
16250                 attr.priority = i;
16251                 /* Create matchers for colors. */
16252                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16253                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16254                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16255                                 &attr, svport_match, NULL,
16256                                 &color_rule->matcher, &flow_err)) {
16257                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16258                         goto err_exit;
16259                 }
16260                 /* Create flow, matching color. */
16261                 if (__flow_dv_create_policy_flow(dev,
16262                                 color_reg_c_idx, (enum rte_color)i,
16263                                 color_rule->matcher->matcher_object,
16264                                 acts[i].actions_n, acts[i].dv_actions,
16265                                 svport_match, NULL, &color_rule->rule,
16266                                 &attr)) {
16267                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16268                         goto err_exit;
16269                 }
16270         }
16271         return 0;
16272 err_exit:
16273         /* All the policy rules will be cleared. */
16274         do {
16275                 color_rule = tmp_rules[i];
16276                 if (color_rule) {
16277                         if (color_rule->rule)
16278                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16279                         if (color_rule->matcher) {
16280                                 struct mlx5_flow_tbl_data_entry *tbl =
16281                                         container_of(color_rule->matcher->tbl,
16282                                                      typeof(*tbl), tbl);
16283                                 mlx5_list_unregister(tbl->matchers,
16284                                                 &color_rule->matcher->entry);
16285                         }
16286                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16287                                      color_rule, next_port);
16288                         mlx5_free(color_rule);
16289                 }
16290         } while (i--);
16291         return -1;
16292 }
16293
16294 static int
16295 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16296                         struct mlx5_flow_meter_policy *mtr_policy,
16297                         struct mlx5_flow_meter_sub_policy *sub_policy,
16298                         uint32_t domain)
16299 {
16300         struct mlx5_priv *priv = dev->data->dev_private;
16301         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16302         struct mlx5_flow_dv_tag_resource *tag;
16303         struct mlx5_flow_dv_port_id_action_resource *port_action;
16304         struct mlx5_hrxq *hrxq;
16305         struct mlx5_flow_meter_info *next_fm = NULL;
16306         struct mlx5_flow_meter_policy *next_policy;
16307         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16308         struct mlx5_flow_tbl_data_entry *tbl_data;
16309         struct rte_flow_error error;
16310         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16311         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16312         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16313         bool match_src_port = false;
16314         int i;
16315
16316         /* If RSS or Queue, no previous actions / rules is created. */
16317         for (i = 0; i < RTE_COLORS; i++) {
16318                 acts[i].actions_n = 0;
16319                 if (i == RTE_COLOR_RED) {
16320                         /* Only support drop on red. */
16321                         acts[i].dv_actions[0] =
16322                                 mtr_policy->dr_drop_action[domain];
16323                         acts[i].actions_n = 1;
16324                         continue;
16325                 }
16326                 if (i == RTE_COLOR_GREEN &&
16327                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16328                         struct rte_flow_attr attr = {
16329                                 .transfer = transfer
16330                         };
16331
16332                         next_fm = mlx5_flow_meter_find(priv,
16333                                         mtr_policy->act_cnt[i].next_mtr_id,
16334                                         NULL);
16335                         if (!next_fm) {
16336                                 DRV_LOG(ERR,
16337                                         "Failed to get next hierarchy meter.");
16338                                 goto err_exit;
16339                         }
16340                         if (mlx5_flow_meter_attach(priv, next_fm,
16341                                                    &attr, &error)) {
16342                                 DRV_LOG(ERR, "%s", error.message);
16343                                 next_fm = NULL;
16344                                 goto err_exit;
16345                         }
16346                         /* Meter action must be the first for TX. */
16347                         if (mtr_first) {
16348                                 acts[i].dv_actions[acts[i].actions_n] =
16349                                         next_fm->meter_action;
16350                                 acts[i].actions_n++;
16351                         }
16352                 }
16353                 if (mtr_policy->act_cnt[i].rix_mark) {
16354                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16355                                         mtr_policy->act_cnt[i].rix_mark);
16356                         if (!tag) {
16357                                 DRV_LOG(ERR, "Failed to find "
16358                                 "mark action for policy.");
16359                                 goto err_exit;
16360                         }
16361                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16362                         acts[i].actions_n++;
16363                 }
16364                 if (mtr_policy->act_cnt[i].modify_hdr) {
16365                         acts[i].dv_actions[acts[i].actions_n] =
16366                                 mtr_policy->act_cnt[i].modify_hdr->action;
16367                         acts[i].actions_n++;
16368                 }
16369                 if (mtr_policy->act_cnt[i].fate_action) {
16370                         switch (mtr_policy->act_cnt[i].fate_action) {
16371                         case MLX5_FLOW_FATE_PORT_ID:
16372                                 port_action = mlx5_ipool_get
16373                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16374                                 mtr_policy->act_cnt[i].rix_port_id_action);
16375                                 if (!port_action) {
16376                                         DRV_LOG(ERR, "Failed to find "
16377                                                 "port action for policy.");
16378                                         goto err_exit;
16379                                 }
16380                                 acts[i].dv_actions[acts[i].actions_n] =
16381                                         port_action->action;
16382                                 acts[i].actions_n++;
16383                                 mtr_policy->dev = dev;
16384                                 match_src_port = true;
16385                                 break;
16386                         case MLX5_FLOW_FATE_DROP:
16387                         case MLX5_FLOW_FATE_JUMP:
16388                                 acts[i].dv_actions[acts[i].actions_n] =
16389                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16390                                 acts[i].actions_n++;
16391                                 break;
16392                         case MLX5_FLOW_FATE_SHARED_RSS:
16393                         case MLX5_FLOW_FATE_QUEUE:
16394                                 hrxq = mlx5_ipool_get
16395                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16396                                          sub_policy->rix_hrxq[i]);
16397                                 if (!hrxq) {
16398                                         DRV_LOG(ERR, "Failed to find "
16399                                                 "queue action for policy.");
16400                                         goto err_exit;
16401                                 }
16402                                 acts[i].dv_actions[acts[i].actions_n] =
16403                                         hrxq->action;
16404                                 acts[i].actions_n++;
16405                                 break;
16406                         case MLX5_FLOW_FATE_MTR:
16407                                 if (!next_fm) {
16408                                         DRV_LOG(ERR,
16409                                                 "No next hierarchy meter.");
16410                                         goto err_exit;
16411                                 }
16412                                 if (!mtr_first) {
16413                                         acts[i].dv_actions[acts[i].actions_n] =
16414                                                         next_fm->meter_action;
16415                                         acts[i].actions_n++;
16416                                 }
16417                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16418                                         next_sub_policy =
16419                                         mtr_policy->act_cnt[i].next_sub_policy;
16420                                 } else {
16421                                         next_policy =
16422                                                 mlx5_flow_meter_policy_find(dev,
16423                                                 next_fm->policy_id, NULL);
16424                                         MLX5_ASSERT(next_policy);
16425                                         next_sub_policy =
16426                                         next_policy->sub_policys[domain][0];
16427                                 }
16428                                 tbl_data =
16429                                         container_of(next_sub_policy->tbl_rsc,
16430                                         struct mlx5_flow_tbl_data_entry, tbl);
16431                                 acts[i].dv_actions[acts[i].actions_n++] =
16432                                                         tbl_data->jump.action;
16433                                 if (mtr_policy->act_cnt[i].modify_hdr)
16434                                         match_src_port = !!transfer;
16435                                 break;
16436                         default:
16437                                 /*Queue action do nothing*/
16438                                 break;
16439                         }
16440                 }
16441         }
16442         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16443                                 egress, transfer, match_src_port, acts)) {
16444                 DRV_LOG(ERR,
16445                         "Failed to create policy rules per domain.");
16446                 goto err_exit;
16447         }
16448         return 0;
16449 err_exit:
16450         if (next_fm)
16451                 mlx5_flow_meter_detach(priv, next_fm);
16452         return -1;
16453 }
16454
16455 /**
16456  * Create the policy rules.
16457  *
16458  * @param[in] dev
16459  *   Pointer to Ethernet device.
16460  * @param[in,out] mtr_policy
16461  *   Pointer to meter policy table.
16462  *
16463  * @return
16464  *   0 on success, -1 otherwise.
16465  */
16466 static int
16467 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16468                              struct mlx5_flow_meter_policy *mtr_policy)
16469 {
16470         int i;
16471         uint16_t sub_policy_num;
16472
16473         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16474                 sub_policy_num = (mtr_policy->sub_policy_num >>
16475                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16476                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16477                 if (!sub_policy_num)
16478                         continue;
16479                 /* Prepare actions list and create policy rules. */
16480                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16481                         mtr_policy->sub_policys[i][0], i)) {
16482                         DRV_LOG(ERR, "Failed to create policy action "
16483                                 "list per domain.");
16484                         return -1;
16485                 }
16486         }
16487         return 0;
16488 }
16489
16490 static int
16491 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16492 {
16493         struct mlx5_priv *priv = dev->data->dev_private;
16494         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16495         struct mlx5_flow_meter_def_policy *def_policy;
16496         struct mlx5_flow_tbl_resource *jump_tbl;
16497         struct mlx5_flow_tbl_data_entry *tbl_data;
16498         uint8_t egress, transfer;
16499         struct rte_flow_error error;
16500         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16501         int ret;
16502
16503         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16504         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16505         def_policy = mtrmng->def_policy[domain];
16506         if (!def_policy) {
16507                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16508                         sizeof(struct mlx5_flow_meter_def_policy),
16509                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16510                 if (!def_policy) {
16511                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16512                         goto def_policy_error;
16513                 }
16514                 mtrmng->def_policy[domain] = def_policy;
16515                 /* Create the meter suffix table with SUFFIX level. */
16516                 jump_tbl = flow_dv_tbl_resource_get(dev,
16517                                 MLX5_FLOW_TABLE_LEVEL_METER,
16518                                 egress, transfer, false, NULL, 0,
16519                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16520                 if (!jump_tbl) {
16521                         DRV_LOG(ERR,
16522                                 "Failed to create meter suffix table.");
16523                         goto def_policy_error;
16524                 }
16525                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16526                 tbl_data = container_of(jump_tbl,
16527                                         struct mlx5_flow_tbl_data_entry, tbl);
16528                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16529                                                 tbl_data->jump.action;
16530                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16531                 acts[RTE_COLOR_GREEN].actions_n = 1;
16532                 /*
16533                  * YELLOW has the same default policy as GREEN does.
16534                  * G & Y share the same table and action. The 2nd time of table
16535                  * resource getting is just to update the reference count for
16536                  * the releasing stage.
16537                  */
16538                 jump_tbl = flow_dv_tbl_resource_get(dev,
16539                                 MLX5_FLOW_TABLE_LEVEL_METER,
16540                                 egress, transfer, false, NULL, 0,
16541                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16542                 if (!jump_tbl) {
16543                         DRV_LOG(ERR,
16544                                 "Failed to get meter suffix table.");
16545                         goto def_policy_error;
16546                 }
16547                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16548                 tbl_data = container_of(jump_tbl,
16549                                         struct mlx5_flow_tbl_data_entry, tbl);
16550                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16551                                                 tbl_data->jump.action;
16552                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16553                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16554                 /* Create jump action to the drop table. */
16555                 if (!mtrmng->drop_tbl[domain]) {
16556                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16557                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16558                                  egress, transfer, false, NULL, 0,
16559                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16560                         if (!mtrmng->drop_tbl[domain]) {
16561                                 DRV_LOG(ERR, "Failed to create meter "
16562                                         "drop table for default policy.");
16563                                 goto def_policy_error;
16564                         }
16565                 }
16566                 /* all RED: unique Drop table for jump action. */
16567                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16568                                         struct mlx5_flow_tbl_data_entry, tbl);
16569                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16570                                                 tbl_data->jump.action;
16571                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16572                 acts[RTE_COLOR_RED].actions_n = 1;
16573                 /* Create default policy rules. */
16574                 ret = __flow_dv_create_domain_policy_rules(dev,
16575                                         &def_policy->sub_policy,
16576                                         egress, transfer, false, acts);
16577                 if (ret) {
16578                         DRV_LOG(ERR, "Failed to create default policy rules.");
16579                         goto def_policy_error;
16580                 }
16581         }
16582         return 0;
16583 def_policy_error:
16584         __flow_dv_destroy_domain_def_policy(dev,
16585                                             (enum mlx5_meter_domain)domain);
16586         return -1;
16587 }
16588
16589 /**
16590  * Create the default policy table set.
16591  *
16592  * @param[in] dev
16593  *   Pointer to Ethernet device.
16594  * @return
16595  *   0 on success, -1 otherwise.
16596  */
16597 static int
16598 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16599 {
16600         struct mlx5_priv *priv = dev->data->dev_private;
16601         int i;
16602
16603         /* Non-termination policy table. */
16604         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16605                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16606                         continue;
16607                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16608                         DRV_LOG(ERR, "Failed to create default policy");
16609                         /* Rollback the created default policies for others. */
16610                         flow_dv_destroy_def_policy(dev);
16611                         return -1;
16612                 }
16613         }
16614         return 0;
16615 }
16616
16617 /**
16618  * Create the needed meter tables.
16619  * Lock free, (mutex should be acquired by caller).
16620  *
16621  * @param[in] dev
16622  *   Pointer to Ethernet device.
16623  * @param[in] fm
16624  *   Meter information table.
16625  * @param[in] mtr_idx
16626  *   Meter index.
16627  * @param[in] domain_bitmap
16628  *   Domain bitmap.
16629  * @return
16630  *   0 on success, -1 otherwise.
16631  */
16632 static int
16633 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16634                         struct mlx5_flow_meter_info *fm,
16635                         uint32_t mtr_idx,
16636                         uint8_t domain_bitmap)
16637 {
16638         struct mlx5_priv *priv = dev->data->dev_private;
16639         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16640         struct rte_flow_error error;
16641         struct mlx5_flow_tbl_data_entry *tbl_data;
16642         uint8_t egress, transfer;
16643         void *actions[METER_ACTIONS];
16644         int domain, ret, i;
16645         struct mlx5_flow_counter *cnt;
16646         struct mlx5_flow_dv_match_params value = {
16647                 .size = sizeof(value.buf),
16648         };
16649         struct mlx5_flow_dv_match_params matcher_para = {
16650                 .size = sizeof(matcher_para.buf),
16651         };
16652         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16653                                                      0, &error);
16654         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16655         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16656         struct mlx5_list_entry *entry;
16657         struct mlx5_flow_dv_matcher matcher = {
16658                 .mask = {
16659                         .size = sizeof(matcher.mask.buf),
16660                 },
16661         };
16662         struct mlx5_flow_dv_matcher *drop_matcher;
16663         struct mlx5_flow_cb_ctx ctx = {
16664                 .error = &error,
16665                 .data = &matcher,
16666         };
16667         uint8_t misc_mask;
16668
16669         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16670                 rte_errno = ENOTSUP;
16671                 return -1;
16672         }
16673         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16674                 if (!(domain_bitmap & (1 << domain)) ||
16675                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16676                         continue;
16677                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16678                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16679                 /* Create the drop table with METER DROP level. */
16680                 if (!mtrmng->drop_tbl[domain]) {
16681                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16682                                         MLX5_FLOW_TABLE_LEVEL_METER,
16683                                         egress, transfer, false, NULL, 0,
16684                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16685                         if (!mtrmng->drop_tbl[domain]) {
16686                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16687                                 goto policy_error;
16688                         }
16689                 }
16690                 /* Create default matcher in drop table. */
16691                 matcher.tbl = mtrmng->drop_tbl[domain],
16692                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16693                                 struct mlx5_flow_tbl_data_entry, tbl);
16694                 if (!mtrmng->def_matcher[domain]) {
16695                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16696                                        (enum modify_reg)mtr_id_reg_c,
16697                                        0, 0);
16698                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16699                         matcher.crc = rte_raw_cksum
16700                                         ((const void *)matcher.mask.buf,
16701                                         matcher.mask.size);
16702                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16703                         if (!entry) {
16704                                 DRV_LOG(ERR, "Failed to register meter "
16705                                 "drop default matcher.");
16706                                 goto policy_error;
16707                         }
16708                         mtrmng->def_matcher[domain] = container_of(entry,
16709                         struct mlx5_flow_dv_matcher, entry);
16710                 }
16711                 /* Create default rule in drop table. */
16712                 if (!mtrmng->def_rule[domain]) {
16713                         i = 0;
16714                         actions[i++] = priv->sh->dr_drop_action;
16715                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16716                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16717                         misc_mask = flow_dv_matcher_enable(value.buf);
16718                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16719                         ret = mlx5_flow_os_create_flow
16720                                 (mtrmng->def_matcher[domain]->matcher_object,
16721                                 (void *)&value, i, actions,
16722                                 &mtrmng->def_rule[domain]);
16723                         if (ret) {
16724                                 DRV_LOG(ERR, "Failed to create meter "
16725                                 "default drop rule for drop table.");
16726                                 goto policy_error;
16727                         }
16728                 }
16729                 if (!fm->drop_cnt)
16730                         continue;
16731                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16732                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16733                         /* Create matchers for Drop. */
16734                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16735                                         (enum modify_reg)mtr_id_reg_c, 0,
16736                                         (mtr_id_mask << mtr_id_offset));
16737                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16738                         matcher.crc = rte_raw_cksum
16739                                         ((const void *)matcher.mask.buf,
16740                                         matcher.mask.size);
16741                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16742                         if (!entry) {
16743                                 DRV_LOG(ERR,
16744                                 "Failed to register meter drop matcher.");
16745                                 goto policy_error;
16746                         }
16747                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16748                                 container_of(entry, struct mlx5_flow_dv_matcher,
16749                                              entry);
16750                 }
16751                 drop_matcher =
16752                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16753                 /* Create drop rule, matching meter_id only. */
16754                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16755                                 (enum modify_reg)mtr_id_reg_c,
16756                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16757                 i = 0;
16758                 cnt = flow_dv_counter_get_by_idx(dev,
16759                                         fm->drop_cnt, NULL);
16760                 actions[i++] = cnt->action;
16761                 actions[i++] = priv->sh->dr_drop_action;
16762                 misc_mask = flow_dv_matcher_enable(value.buf);
16763                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16764                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16765                                                (void *)&value, i, actions,
16766                                                &fm->drop_rule[domain]);
16767                 if (ret) {
16768                         DRV_LOG(ERR, "Failed to create meter "
16769                                 "drop rule for drop table.");
16770                                 goto policy_error;
16771                 }
16772         }
16773         return 0;
16774 policy_error:
16775         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16776                 if (fm->drop_rule[i]) {
16777                         claim_zero(mlx5_flow_os_destroy_flow
16778                                 (fm->drop_rule[i]));
16779                         fm->drop_rule[i] = NULL;
16780                 }
16781         }
16782         return -1;
16783 }
16784
16785 static struct mlx5_flow_meter_sub_policy *
16786 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16787                 struct mlx5_flow_meter_policy *mtr_policy,
16788                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16789                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16790                 bool *is_reuse)
16791 {
16792         struct mlx5_priv *priv = dev->data->dev_private;
16793         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16794         uint32_t sub_policy_idx = 0;
16795         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16796         uint32_t i, j;
16797         struct mlx5_hrxq *hrxq;
16798         struct mlx5_flow_handle dh;
16799         struct mlx5_meter_policy_action_container *act_cnt;
16800         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16801         uint16_t sub_policy_num;
16802
16803         rte_spinlock_lock(&mtr_policy->sl);
16804         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16805                 if (!rss_desc[i])
16806                         continue;
16807                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16808                 if (!hrxq_idx[i]) {
16809                         rte_spinlock_unlock(&mtr_policy->sl);
16810                         return NULL;
16811                 }
16812         }
16813         sub_policy_num = (mtr_policy->sub_policy_num >>
16814                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16815                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16816         for (j = 0; j < sub_policy_num; j++) {
16817                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16818                         if (rss_desc[i] &&
16819                             hrxq_idx[i] !=
16820                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16821                                 break;
16822                 }
16823                 if (i >= MLX5_MTR_RTE_COLORS) {
16824                         /*
16825                          * Found the sub policy table with
16826                          * the same queue per color.
16827                          */
16828                         rte_spinlock_unlock(&mtr_policy->sl);
16829                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16830                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16831                         *is_reuse = true;
16832                         return mtr_policy->sub_policys[domain][j];
16833                 }
16834         }
16835         /* Create sub policy. */
16836         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16837                 /* Reuse the first pre-allocated sub_policy. */
16838                 sub_policy = mtr_policy->sub_policys[domain][0];
16839                 sub_policy_idx = sub_policy->idx;
16840         } else {
16841                 sub_policy = mlx5_ipool_zmalloc
16842                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16843                                  &sub_policy_idx);
16844                 if (!sub_policy ||
16845                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16846                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16847                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16848                         goto rss_sub_policy_error;
16849                 }
16850                 sub_policy->idx = sub_policy_idx;
16851                 sub_policy->main_policy = mtr_policy;
16852         }
16853         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16854                 if (!rss_desc[i])
16855                         continue;
16856                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16857                 if (mtr_policy->is_hierarchy) {
16858                         act_cnt = &mtr_policy->act_cnt[i];
16859                         act_cnt->next_sub_policy = next_sub_policy;
16860                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16861                 } else {
16862                         /*
16863                          * Overwrite the last action from
16864                          * RSS action to Queue action.
16865                          */
16866                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16867                                               hrxq_idx[i]);
16868                         if (!hrxq) {
16869                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16870                                 goto rss_sub_policy_error;
16871                         }
16872                         act_cnt = &mtr_policy->act_cnt[i];
16873                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16874                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16875                                 if (act_cnt->rix_mark)
16876                                         dh.mark = 1;
16877                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16878                                 dh.rix_hrxq = hrxq_idx[i];
16879                                 flow_drv_rxq_flags_set(dev, &dh);
16880                         }
16881                 }
16882         }
16883         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16884                                                sub_policy, domain)) {
16885                 DRV_LOG(ERR, "Failed to create policy "
16886                         "rules for ingress domain.");
16887                 goto rss_sub_policy_error;
16888         }
16889         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16890                 i = (mtr_policy->sub_policy_num >>
16891                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16892                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16893                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16894                         DRV_LOG(ERR, "No free sub-policy slot.");
16895                         goto rss_sub_policy_error;
16896                 }
16897                 mtr_policy->sub_policys[domain][i] = sub_policy;
16898                 i++;
16899                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16900                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16901                 mtr_policy->sub_policy_num |=
16902                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16903                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16904         }
16905         rte_spinlock_unlock(&mtr_policy->sl);
16906         *is_reuse = false;
16907         return sub_policy;
16908 rss_sub_policy_error:
16909         if (sub_policy) {
16910                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16911                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16912                         i = (mtr_policy->sub_policy_num >>
16913                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16914                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16915                         mtr_policy->sub_policys[domain][i] = NULL;
16916                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16917                                         sub_policy->idx);
16918                 }
16919         }
16920         rte_spinlock_unlock(&mtr_policy->sl);
16921         return NULL;
16922 }
16923
16924 /**
16925  * Find the policy table for prefix table with RSS.
16926  *
16927  * @param[in] dev
16928  *   Pointer to Ethernet device.
16929  * @param[in] mtr_policy
16930  *   Pointer to meter policy table.
16931  * @param[in] rss_desc
16932  *   Pointer to rss_desc
16933  * @return
16934  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16935  */
16936 static struct mlx5_flow_meter_sub_policy *
16937 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16938                 struct mlx5_flow_meter_policy *mtr_policy,
16939                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16940 {
16941         struct mlx5_priv *priv = dev->data->dev_private;
16942         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16943         struct mlx5_flow_meter_info *next_fm;
16944         struct mlx5_flow_meter_policy *next_policy;
16945         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16946         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16947         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16948         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16949         bool reuse_sub_policy;
16950         uint32_t i = 0;
16951         uint32_t j = 0;
16952
16953         while (true) {
16954                 /* Iterate hierarchy to get all policies in this hierarchy. */
16955                 policies[i++] = mtr_policy;
16956                 if (!mtr_policy->is_hierarchy)
16957                         break;
16958                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16959                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16960                         return NULL;
16961                 }
16962                 next_fm = mlx5_flow_meter_find(priv,
16963                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16964                 if (!next_fm) {
16965                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16966                         return NULL;
16967                 }
16968                 next_policy =
16969                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16970                                                     NULL);
16971                 MLX5_ASSERT(next_policy);
16972                 mtr_policy = next_policy;
16973         }
16974         while (i) {
16975                 /**
16976                  * From last policy to the first one in hierarchy,
16977                  * create / get the sub policy for each of them.
16978                  */
16979                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16980                                                         policies[--i],
16981                                                         rss_desc,
16982                                                         next_sub_policy,
16983                                                         &reuse_sub_policy);
16984                 if (!sub_policy) {
16985                         DRV_LOG(ERR, "Failed to get the sub policy.");
16986                         goto err_exit;
16987                 }
16988                 if (!reuse_sub_policy)
16989                         sub_policies[j++] = sub_policy;
16990                 next_sub_policy = sub_policy;
16991         }
16992         return sub_policy;
16993 err_exit:
16994         while (j) {
16995                 uint16_t sub_policy_num;
16996
16997                 sub_policy = sub_policies[--j];
16998                 mtr_policy = sub_policy->main_policy;
16999                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17000                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17001                         sub_policy_num = (mtr_policy->sub_policy_num >>
17002                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17003                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17004                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17005                                                                         NULL;
17006                         sub_policy_num--;
17007                         mtr_policy->sub_policy_num &=
17008                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17009                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17010                         mtr_policy->sub_policy_num |=
17011                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17012                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17013                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17014                                         sub_policy->idx);
17015                 }
17016         }
17017         return NULL;
17018 }
17019
17020 /**
17021  * Create the sub policy tag rule for all meters in hierarchy.
17022  *
17023  * @param[in] dev
17024  *   Pointer to Ethernet device.
17025  * @param[in] fm
17026  *   Meter information table.
17027  * @param[in] src_port
17028  *   The src port this extra rule should use.
17029  * @param[in] item
17030  *   The src port match item.
17031  * @param[out] error
17032  *   Perform verbose error reporting if not NULL.
17033  * @return
17034  *   0 on success, a negative errno value otherwise and rte_errno is set.
17035  */
17036 static int
17037 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17038                                 struct mlx5_flow_meter_info *fm,
17039                                 int32_t src_port,
17040                                 const struct rte_flow_item *item,
17041                                 struct rte_flow_error *error)
17042 {
17043         struct mlx5_priv *priv = dev->data->dev_private;
17044         struct mlx5_flow_meter_policy *mtr_policy;
17045         struct mlx5_flow_meter_sub_policy *sub_policy;
17046         struct mlx5_flow_meter_info *next_fm = NULL;
17047         struct mlx5_flow_meter_policy *next_policy;
17048         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17049         struct mlx5_flow_tbl_data_entry *tbl_data;
17050         struct mlx5_sub_policy_color_rule *color_rule;
17051         struct mlx5_meter_policy_acts acts;
17052         uint32_t color_reg_c_idx;
17053         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17054         struct rte_flow_attr attr = {
17055                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17056                 .priority = 0,
17057                 .ingress = 0,
17058                 .egress = 0,
17059                 .transfer = 1,
17060                 .reserved = 0,
17061         };
17062         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17063         int i;
17064
17065         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17066         MLX5_ASSERT(mtr_policy);
17067         if (!mtr_policy->is_hierarchy)
17068                 return 0;
17069         next_fm = mlx5_flow_meter_find(priv,
17070                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17071         if (!next_fm) {
17072                 return rte_flow_error_set(error, EINVAL,
17073                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17074                                 "Failed to find next meter in hierarchy.");
17075         }
17076         if (!next_fm->drop_cnt)
17077                 goto exit;
17078         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17079         sub_policy = mtr_policy->sub_policys[domain][0];
17080         for (i = 0; i < RTE_COLORS; i++) {
17081                 bool rule_exist = false;
17082                 struct mlx5_meter_policy_action_container *act_cnt;
17083
17084                 if (i >= RTE_COLOR_YELLOW)
17085                         break;
17086                 TAILQ_FOREACH(color_rule,
17087                               &sub_policy->color_rules[i], next_port)
17088                         if (color_rule->src_port == src_port) {
17089                                 rule_exist = true;
17090                                 break;
17091                         }
17092                 if (rule_exist)
17093                         continue;
17094                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17095                                 sizeof(struct mlx5_sub_policy_color_rule),
17096                                 0, SOCKET_ID_ANY);
17097                 if (!color_rule)
17098                         return rte_flow_error_set(error, ENOMEM,
17099                                 RTE_FLOW_ERROR_TYPE_ACTION,
17100                                 NULL, "No memory to create tag color rule.");
17101                 color_rule->src_port = src_port;
17102                 attr.priority = i;
17103                 next_policy = mlx5_flow_meter_policy_find(dev,
17104                                                 next_fm->policy_id, NULL);
17105                 MLX5_ASSERT(next_policy);
17106                 next_sub_policy = next_policy->sub_policys[domain][0];
17107                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17108                                         struct mlx5_flow_tbl_data_entry, tbl);
17109                 act_cnt = &mtr_policy->act_cnt[i];
17110                 if (mtr_first) {
17111                         acts.dv_actions[0] = next_fm->meter_action;
17112                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17113                 } else {
17114                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17115                         acts.dv_actions[1] = next_fm->meter_action;
17116                 }
17117                 acts.dv_actions[2] = tbl_data->jump.action;
17118                 acts.actions_n = 3;
17119                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17120                         next_fm = NULL;
17121                         goto err_exit;
17122                 }
17123                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17124                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17125                                 &attr, true, item,
17126                                 &color_rule->matcher, error)) {
17127                         rte_flow_error_set(error, errno,
17128                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17129                                 "Failed to create hierarchy meter matcher.");
17130                         goto err_exit;
17131                 }
17132                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17133                                         (enum rte_color)i,
17134                                         color_rule->matcher->matcher_object,
17135                                         acts.actions_n, acts.dv_actions,
17136                                         true, item,
17137                                         &color_rule->rule, &attr)) {
17138                         rte_flow_error_set(error, errno,
17139                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17140                                 "Failed to create hierarchy meter rule.");
17141                         goto err_exit;
17142                 }
17143                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17144                                   color_rule, next_port);
17145         }
17146 exit:
17147         /**
17148          * Recursive call to iterate all meters in hierarchy and
17149          * create needed rules.
17150          */
17151         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17152                                                 src_port, item, error);
17153 err_exit:
17154         if (color_rule) {
17155                 if (color_rule->rule)
17156                         mlx5_flow_os_destroy_flow(color_rule->rule);
17157                 if (color_rule->matcher) {
17158                         struct mlx5_flow_tbl_data_entry *tbl =
17159                                 container_of(color_rule->matcher->tbl,
17160                                                 typeof(*tbl), tbl);
17161                         mlx5_list_unregister(tbl->matchers,
17162                                                 &color_rule->matcher->entry);
17163                 }
17164                 mlx5_free(color_rule);
17165         }
17166         if (next_fm)
17167                 mlx5_flow_meter_detach(priv, next_fm);
17168         return -rte_errno;
17169 }
17170
17171 /**
17172  * Destroy the sub policy table with RX queue.
17173  *
17174  * @param[in] dev
17175  *   Pointer to Ethernet device.
17176  * @param[in] mtr_policy
17177  *   Pointer to meter policy table.
17178  */
17179 static void
17180 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17181                                     struct mlx5_flow_meter_policy *mtr_policy)
17182 {
17183         struct mlx5_priv *priv = dev->data->dev_private;
17184         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17185         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17186         uint32_t i, j;
17187         uint16_t sub_policy_num, new_policy_num;
17188
17189         rte_spinlock_lock(&mtr_policy->sl);
17190         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17191                 switch (mtr_policy->act_cnt[i].fate_action) {
17192                 case MLX5_FLOW_FATE_SHARED_RSS:
17193                         sub_policy_num = (mtr_policy->sub_policy_num >>
17194                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17195                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17196                         new_policy_num = sub_policy_num;
17197                         for (j = 0; j < sub_policy_num; j++) {
17198                                 sub_policy =
17199                                         mtr_policy->sub_policys[domain][j];
17200                                 if (sub_policy) {
17201                                         __flow_dv_destroy_sub_policy_rules(dev,
17202                                                 sub_policy);
17203                                 if (sub_policy !=
17204                                         mtr_policy->sub_policys[domain][0]) {
17205                                         mtr_policy->sub_policys[domain][j] =
17206                                                                 NULL;
17207                                         mlx5_ipool_free
17208                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17209                                                 sub_policy->idx);
17210                                                 new_policy_num--;
17211                                         }
17212                                 }
17213                         }
17214                         if (new_policy_num != sub_policy_num) {
17215                                 mtr_policy->sub_policy_num &=
17216                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17217                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17218                                 mtr_policy->sub_policy_num |=
17219                                 (new_policy_num &
17220                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17221                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17222                         }
17223                         break;
17224                 case MLX5_FLOW_FATE_QUEUE:
17225                         sub_policy = mtr_policy->sub_policys[domain][0];
17226                         __flow_dv_destroy_sub_policy_rules(dev,
17227                                                            sub_policy);
17228                         break;
17229                 default:
17230                         /*Other actions without queue and do nothing*/
17231                         break;
17232                 }
17233         }
17234         rte_spinlock_unlock(&mtr_policy->sl);
17235 }
17236 /**
17237  * Check whether the DR drop action is supported on the root table or not.
17238  *
17239  * Create a simple flow with DR drop action on root table to validate
17240  * if DR drop action on root table is supported or not.
17241  *
17242  * @param[in] dev
17243  *   Pointer to rte_eth_dev structure.
17244  *
17245  * @return
17246  *   0 on success, a negative errno value otherwise and rte_errno is set.
17247  */
17248 int
17249 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17250 {
17251         struct mlx5_priv *priv = dev->data->dev_private;
17252         struct mlx5_dev_ctx_shared *sh = priv->sh;
17253         struct mlx5_flow_dv_match_params mask = {
17254                 .size = sizeof(mask.buf),
17255         };
17256         struct mlx5_flow_dv_match_params value = {
17257                 .size = sizeof(value.buf),
17258         };
17259         struct mlx5dv_flow_matcher_attr dv_attr = {
17260                 .type = IBV_FLOW_ATTR_NORMAL,
17261                 .priority = 0,
17262                 .match_criteria_enable = 0,
17263                 .match_mask = (void *)&mask,
17264         };
17265         struct mlx5_flow_tbl_resource *tbl = NULL;
17266         void *matcher = NULL;
17267         void *flow = NULL;
17268         int ret = -1;
17269
17270         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17271                                         0, 0, 0, NULL);
17272         if (!tbl)
17273                 goto err;
17274         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17275         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17276         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17277                                                tbl->obj, &matcher);
17278         if (ret)
17279                 goto err;
17280         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17281         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17282                                        &sh->dr_drop_action, &flow);
17283 err:
17284         /*
17285          * If DR drop action is not supported on root table, flow create will
17286          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17287          */
17288         if (!flow) {
17289                 if (matcher &&
17290                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17291                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17292                 else
17293                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17294                 ret = -1;
17295         } else {
17296                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17297         }
17298         if (matcher)
17299                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17300         if (tbl)
17301                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17302         return ret;
17303 }
17304
17305 /**
17306  * Validate the batch counter support in root table.
17307  *
17308  * Create a simple flow with invalid counter and drop action on root table to
17309  * validate if batch counter with offset on root table is supported or not.
17310  *
17311  * @param[in] dev
17312  *   Pointer to rte_eth_dev structure.
17313  *
17314  * @return
17315  *   0 on success, a negative errno value otherwise and rte_errno is set.
17316  */
17317 int
17318 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17319 {
17320         struct mlx5_priv *priv = dev->data->dev_private;
17321         struct mlx5_dev_ctx_shared *sh = priv->sh;
17322         struct mlx5_flow_dv_match_params mask = {
17323                 .size = sizeof(mask.buf),
17324         };
17325         struct mlx5_flow_dv_match_params value = {
17326                 .size = sizeof(value.buf),
17327         };
17328         struct mlx5dv_flow_matcher_attr dv_attr = {
17329                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17330                 .priority = 0,
17331                 .match_criteria_enable = 0,
17332                 .match_mask = (void *)&mask,
17333         };
17334         void *actions[2] = { 0 };
17335         struct mlx5_flow_tbl_resource *tbl = NULL;
17336         struct mlx5_devx_obj *dcs = NULL;
17337         void *matcher = NULL;
17338         void *flow = NULL;
17339         int ret = -1;
17340
17341         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17342                                         0, 0, 0, NULL);
17343         if (!tbl)
17344                 goto err;
17345         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17346         if (!dcs)
17347                 goto err;
17348         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17349                                                     &actions[0]);
17350         if (ret)
17351                 goto err;
17352         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17353         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17354         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17355                                                tbl->obj, &matcher);
17356         if (ret)
17357                 goto err;
17358         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17359         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17360                                        actions, &flow);
17361 err:
17362         /*
17363          * If batch counter with offset is not supported, the driver will not
17364          * validate the invalid offset value, flow create should success.
17365          * In this case, it means batch counter is not supported in root table.
17366          *
17367          * Otherwise, if flow create is failed, counter offset is supported.
17368          */
17369         if (flow) {
17370                 DRV_LOG(INFO, "Batch counter is not supported in root "
17371                               "table. Switch to fallback mode.");
17372                 rte_errno = ENOTSUP;
17373                 ret = -rte_errno;
17374                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17375         } else {
17376                 /* Check matcher to make sure validate fail at flow create. */
17377                 if (!matcher || (matcher && errno != EINVAL))
17378                         DRV_LOG(ERR, "Unexpected error in counter offset "
17379                                      "support detection");
17380                 ret = 0;
17381         }
17382         if (actions[0])
17383                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17384         if (matcher)
17385                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17386         if (tbl)
17387                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17388         if (dcs)
17389                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17390         return ret;
17391 }
17392
17393 /**
17394  * Query a devx counter.
17395  *
17396  * @param[in] dev
17397  *   Pointer to the Ethernet device structure.
17398  * @param[in] cnt
17399  *   Index to the flow counter.
17400  * @param[in] clear
17401  *   Set to clear the counter statistics.
17402  * @param[out] pkts
17403  *   The statistics value of packets.
17404  * @param[out] bytes
17405  *   The statistics value of bytes.
17406  *
17407  * @return
17408  *   0 on success, otherwise return -1.
17409  */
17410 static int
17411 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17412                       uint64_t *pkts, uint64_t *bytes)
17413 {
17414         struct mlx5_priv *priv = dev->data->dev_private;
17415         struct mlx5_flow_counter *cnt;
17416         uint64_t inn_pkts, inn_bytes;
17417         int ret;
17418
17419         if (!priv->sh->devx)
17420                 return -1;
17421
17422         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17423         if (ret)
17424                 return -1;
17425         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17426         *pkts = inn_pkts - cnt->hits;
17427         *bytes = inn_bytes - cnt->bytes;
17428         if (clear) {
17429                 cnt->hits = inn_pkts;
17430                 cnt->bytes = inn_bytes;
17431         }
17432         return 0;
17433 }
17434
17435 /**
17436  * Get aged-out flows.
17437  *
17438  * @param[in] dev
17439  *   Pointer to the Ethernet device structure.
17440  * @param[in] context
17441  *   The address of an array of pointers to the aged-out flows contexts.
17442  * @param[in] nb_contexts
17443  *   The length of context array pointers.
17444  * @param[out] error
17445  *   Perform verbose error reporting if not NULL. Initialized in case of
17446  *   error only.
17447  *
17448  * @return
17449  *   how many contexts get in success, otherwise negative errno value.
17450  *   if nb_contexts is 0, return the amount of all aged contexts.
17451  *   if nb_contexts is not 0 , return the amount of aged flows reported
17452  *   in the context array.
17453  * @note: only stub for now
17454  */
17455 static int
17456 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17457                     void **context,
17458                     uint32_t nb_contexts,
17459                     struct rte_flow_error *error)
17460 {
17461         struct mlx5_priv *priv = dev->data->dev_private;
17462         struct mlx5_age_info *age_info;
17463         struct mlx5_age_param *age_param;
17464         struct mlx5_flow_counter *counter;
17465         struct mlx5_aso_age_action *act;
17466         int nb_flows = 0;
17467
17468         if (nb_contexts && !context)
17469                 return rte_flow_error_set(error, EINVAL,
17470                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17471                                           NULL, "empty context");
17472         age_info = GET_PORT_AGE_INFO(priv);
17473         rte_spinlock_lock(&age_info->aged_sl);
17474         LIST_FOREACH(act, &age_info->aged_aso, next) {
17475                 nb_flows++;
17476                 if (nb_contexts) {
17477                         context[nb_flows - 1] =
17478                                                 act->age_params.context;
17479                         if (!(--nb_contexts))
17480                                 break;
17481                 }
17482         }
17483         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17484                 nb_flows++;
17485                 if (nb_contexts) {
17486                         age_param = MLX5_CNT_TO_AGE(counter);
17487                         context[nb_flows - 1] = age_param->context;
17488                         if (!(--nb_contexts))
17489                                 break;
17490                 }
17491         }
17492         rte_spinlock_unlock(&age_info->aged_sl);
17493         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17494         return nb_flows;
17495 }
17496
17497 /*
17498  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17499  */
17500 static uint32_t
17501 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17502 {
17503         return flow_dv_counter_alloc(dev, 0);
17504 }
17505
17506 /**
17507  * Validate indirect action.
17508  * Dispatcher for action type specific validation.
17509  *
17510  * @param[in] dev
17511  *   Pointer to the Ethernet device structure.
17512  * @param[in] conf
17513  *   Indirect action configuration.
17514  * @param[in] action
17515  *   The indirect action object to validate.
17516  * @param[out] error
17517  *   Perform verbose error reporting if not NULL. Initialized in case of
17518  *   error only.
17519  *
17520  * @return
17521  *   0 on success, otherwise negative errno value.
17522  */
17523 static int
17524 flow_dv_action_validate(struct rte_eth_dev *dev,
17525                         const struct rte_flow_indir_action_conf *conf,
17526                         const struct rte_flow_action *action,
17527                         struct rte_flow_error *err)
17528 {
17529         struct mlx5_priv *priv = dev->data->dev_private;
17530
17531         RTE_SET_USED(conf);
17532         switch (action->type) {
17533         case RTE_FLOW_ACTION_TYPE_RSS:
17534                 /*
17535                  * priv->obj_ops is set according to driver capabilities.
17536                  * When DevX capabilities are
17537                  * sufficient, it is set to devx_obj_ops.
17538                  * Otherwise, it is set to ibv_obj_ops.
17539                  * ibv_obj_ops doesn't support ind_table_modify operation.
17540                  * In this case the indirect RSS action can't be used.
17541                  */
17542                 if (priv->obj_ops.ind_table_modify == NULL)
17543                         return rte_flow_error_set
17544                                         (err, ENOTSUP,
17545                                          RTE_FLOW_ERROR_TYPE_ACTION,
17546                                          NULL,
17547                                          "Indirect RSS action not supported");
17548                 return mlx5_validate_action_rss(dev, action, err);
17549         case RTE_FLOW_ACTION_TYPE_AGE:
17550                 if (!priv->sh->aso_age_mng)
17551                         return rte_flow_error_set(err, ENOTSUP,
17552                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17553                                                 NULL,
17554                                                 "Indirect age action not supported");
17555                 return flow_dv_validate_action_age(0, action, dev, err);
17556         case RTE_FLOW_ACTION_TYPE_COUNT:
17557                 return flow_dv_validate_action_count(dev, true, 0, err);
17558         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17559                 if (!priv->sh->ct_aso_en)
17560                         return rte_flow_error_set(err, ENOTSUP,
17561                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17562                                         "ASO CT is not supported");
17563                 return mlx5_validate_action_ct(dev, action->conf, err);
17564         default:
17565                 return rte_flow_error_set(err, ENOTSUP,
17566                                           RTE_FLOW_ERROR_TYPE_ACTION,
17567                                           NULL,
17568                                           "action type not supported");
17569         }
17570 }
17571
17572 /*
17573  * Check if the RSS configurations for colors of a meter policy match
17574  * each other, except the queues.
17575  *
17576  * @param[in] r1
17577  *   Pointer to the first RSS flow action.
17578  * @param[in] r2
17579  *   Pointer to the second RSS flow action.
17580  *
17581  * @return
17582  *   0 on match, 1 on conflict.
17583  */
17584 static inline int
17585 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17586                                const struct rte_flow_action_rss *r2)
17587 {
17588         if (r1 == NULL || r2 == NULL)
17589                 return 0;
17590         if (!(r1->level <= 1 && r2->level <= 1) &&
17591             !(r1->level > 1 && r2->level > 1))
17592                 return 1;
17593         if (r1->types != r2->types &&
17594             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17595               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17596                 return 1;
17597         if (r1->key || r2->key) {
17598                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17599                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17600
17601                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17602                         return 1;
17603         }
17604         return 0;
17605 }
17606
17607 /**
17608  * Validate the meter hierarchy chain for meter policy.
17609  *
17610  * @param[in] dev
17611  *   Pointer to the Ethernet device structure.
17612  * @param[in] meter_id
17613  *   Meter id.
17614  * @param[in] action_flags
17615  *   Holds the actions detected until now.
17616  * @param[out] is_rss
17617  *   Is RSS or not.
17618  * @param[out] hierarchy_domain
17619  *   The domain bitmap for hierarchy policy.
17620  * @param[out] error
17621  *   Perform verbose error reporting if not NULL. Initialized in case of
17622  *   error only.
17623  *
17624  * @return
17625  *   0 on success, otherwise negative errno value with error set.
17626  */
17627 static int
17628 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17629                                   uint32_t meter_id,
17630                                   uint64_t action_flags,
17631                                   bool *is_rss,
17632                                   uint8_t *hierarchy_domain,
17633                                   struct rte_mtr_error *error)
17634 {
17635         struct mlx5_priv *priv = dev->data->dev_private;
17636         struct mlx5_flow_meter_info *fm;
17637         struct mlx5_flow_meter_policy *policy;
17638         uint8_t cnt = 1;
17639
17640         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17641                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17642                 return -rte_mtr_error_set(error, EINVAL,
17643                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17644                                         NULL,
17645                                         "Multiple fate actions not supported.");
17646         *hierarchy_domain = 0;
17647         while (true) {
17648                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17649                 if (!fm)
17650                         return -rte_mtr_error_set(error, EINVAL,
17651                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17652                                         "Meter not found in meter hierarchy.");
17653                 if (fm->def_policy)
17654                         return -rte_mtr_error_set(error, EINVAL,
17655                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17656                         "Non termination meter not supported in hierarchy.");
17657                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17658                 MLX5_ASSERT(policy);
17659                 /**
17660                  * Only inherit the supported domains of the first meter in
17661                  * hierarchy.
17662                  * One meter supports at least one domain.
17663                  */
17664                 if (!*hierarchy_domain) {
17665                         if (policy->transfer)
17666                                 *hierarchy_domain |=
17667                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17668                         if (policy->ingress)
17669                                 *hierarchy_domain |=
17670                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17671                         if (policy->egress)
17672                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17673                 }
17674                 if (!policy->is_hierarchy) {
17675                         *is_rss = policy->is_rss;
17676                         break;
17677                 }
17678                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17679                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17680                         return -rte_mtr_error_set(error, EINVAL,
17681                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17682                                         "Exceed max hierarchy meter number.");
17683         }
17684         return 0;
17685 }
17686
17687 /**
17688  * Validate meter policy actions.
17689  * Dispatcher for action type specific validation.
17690  *
17691  * @param[in] dev
17692  *   Pointer to the Ethernet device structure.
17693  * @param[in] action
17694  *   The meter policy action object to validate.
17695  * @param[in] attr
17696  *   Attributes of flow to determine steering domain.
17697  * @param[out] error
17698  *   Perform verbose error reporting if not NULL. Initialized in case of
17699  *   error only.
17700  *
17701  * @return
17702  *   0 on success, otherwise negative errno value.
17703  */
17704 static int
17705 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17706                         const struct rte_flow_action *actions[RTE_COLORS],
17707                         struct rte_flow_attr *attr,
17708                         bool *is_rss,
17709                         uint8_t *domain_bitmap,
17710                         uint8_t *policy_mode,
17711                         struct rte_mtr_error *error)
17712 {
17713         struct mlx5_priv *priv = dev->data->dev_private;
17714         struct mlx5_dev_config *dev_conf = &priv->config;
17715         const struct rte_flow_action *act;
17716         uint64_t action_flags[RTE_COLORS] = {0};
17717         int actions_n;
17718         int i, ret;
17719         struct rte_flow_error flow_err;
17720         uint8_t domain_color[RTE_COLORS] = {0};
17721         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17722         uint8_t hierarchy_domain = 0;
17723         const struct rte_flow_action_meter *mtr;
17724         bool def_green = false;
17725         bool def_yellow = false;
17726         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17727
17728         if (!priv->config.dv_esw_en)
17729                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17730         *domain_bitmap = def_domain;
17731         /* Red color could only support DROP action. */
17732         if (!actions[RTE_COLOR_RED] ||
17733             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17734                 return -rte_mtr_error_set(error, ENOTSUP,
17735                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17736                                 NULL, "Red color only supports drop action.");
17737         /*
17738          * Check default policy actions:
17739          * Green / Yellow: no action, Red: drop action
17740          * Either G or Y will trigger default policy actions to be created.
17741          */
17742         if (!actions[RTE_COLOR_GREEN] ||
17743             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17744                 def_green = true;
17745         if (!actions[RTE_COLOR_YELLOW] ||
17746             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17747                 def_yellow = true;
17748         if (def_green && def_yellow) {
17749                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17750                 return 0;
17751         } else if (!def_green && def_yellow) {
17752                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17753         } else if (def_green && !def_yellow) {
17754                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17755         } else {
17756                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17757         }
17758         /* Set to empty string in case of NULL pointer access by user. */
17759         flow_err.message = "";
17760         for (i = 0; i < RTE_COLORS; i++) {
17761                 act = actions[i];
17762                 for (action_flags[i] = 0, actions_n = 0;
17763                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17764                      act++) {
17765                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17766                                 return -rte_mtr_error_set(error, ENOTSUP,
17767                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17768                                           NULL, "too many actions");
17769                         switch (act->type) {
17770                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17771                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17772                                 if (!priv->config.dv_esw_en)
17773                                         return -rte_mtr_error_set(error,
17774                                         ENOTSUP,
17775                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17776                                         NULL, "PORT action validate check"
17777                                         " fail for ESW disable");
17778                                 ret = flow_dv_validate_action_port_id(dev,
17779                                                 action_flags[i],
17780                                                 act, attr, &flow_err);
17781                                 if (ret)
17782                                         return -rte_mtr_error_set(error,
17783                                         ENOTSUP,
17784                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17785                                         NULL, flow_err.message ?
17786                                         flow_err.message :
17787                                         "PORT action validate check fail");
17788                                 ++actions_n;
17789                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17790                                 break;
17791                         case RTE_FLOW_ACTION_TYPE_MARK:
17792                                 ret = flow_dv_validate_action_mark(dev, act,
17793                                                            action_flags[i],
17794                                                            attr, &flow_err);
17795                                 if (ret < 0)
17796                                         return -rte_mtr_error_set(error,
17797                                         ENOTSUP,
17798                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17799                                         NULL, flow_err.message ?
17800                                         flow_err.message :
17801                                         "Mark action validate check fail");
17802                                 if (dev_conf->dv_xmeta_en !=
17803                                         MLX5_XMETA_MODE_LEGACY)
17804                                         return -rte_mtr_error_set(error,
17805                                         ENOTSUP,
17806                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17807                                         NULL, "Extend MARK action is "
17808                                         "not supported. Please try use "
17809                                         "default policy for meter.");
17810                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17811                                 ++actions_n;
17812                                 break;
17813                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17814                                 ret = flow_dv_validate_action_set_tag(dev,
17815                                                         act, action_flags[i],
17816                                                         attr, &flow_err);
17817                                 if (ret)
17818                                         return -rte_mtr_error_set(error,
17819                                         ENOTSUP,
17820                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17821                                         NULL, flow_err.message ?
17822                                         flow_err.message :
17823                                         "Set tag action validate check fail");
17824                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17825                                 ++actions_n;
17826                                 break;
17827                         case RTE_FLOW_ACTION_TYPE_DROP:
17828                                 ret = mlx5_flow_validate_action_drop
17829                                         (action_flags[i], attr, &flow_err);
17830                                 if (ret < 0)
17831                                         return -rte_mtr_error_set(error,
17832                                         ENOTSUP,
17833                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17834                                         NULL, flow_err.message ?
17835                                         flow_err.message :
17836                                         "Drop action validate check fail");
17837                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17838                                 ++actions_n;
17839                                 break;
17840                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17841                                 /*
17842                                  * Check whether extensive
17843                                  * metadata feature is engaged.
17844                                  */
17845                                 if (dev_conf->dv_flow_en &&
17846                                     (dev_conf->dv_xmeta_en !=
17847                                      MLX5_XMETA_MODE_LEGACY) &&
17848                                     mlx5_flow_ext_mreg_supported(dev))
17849                                         return -rte_mtr_error_set(error,
17850                                           ENOTSUP,
17851                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17852                                           NULL, "Queue action with meta "
17853                                           "is not supported. Please try use "
17854                                           "default policy for meter.");
17855                                 ret = mlx5_flow_validate_action_queue(act,
17856                                                         action_flags[i], dev,
17857                                                         attr, &flow_err);
17858                                 if (ret < 0)
17859                                         return -rte_mtr_error_set(error,
17860                                           ENOTSUP,
17861                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17862                                           NULL, flow_err.message ?
17863                                           flow_err.message :
17864                                           "Queue action validate check fail");
17865                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17866                                 ++actions_n;
17867                                 break;
17868                         case RTE_FLOW_ACTION_TYPE_RSS:
17869                                 if (dev_conf->dv_flow_en &&
17870                                     (dev_conf->dv_xmeta_en !=
17871                                      MLX5_XMETA_MODE_LEGACY) &&
17872                                     mlx5_flow_ext_mreg_supported(dev))
17873                                         return -rte_mtr_error_set(error,
17874                                           ENOTSUP,
17875                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17876                                           NULL, "RSS action with meta "
17877                                           "is not supported. Please try use "
17878                                           "default policy for meter.");
17879                                 ret = mlx5_validate_action_rss(dev, act,
17880                                                                &flow_err);
17881                                 if (ret < 0)
17882                                         return -rte_mtr_error_set(error,
17883                                           ENOTSUP,
17884                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17885                                           NULL, flow_err.message ?
17886                                           flow_err.message :
17887                                           "RSS action validate check fail");
17888                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17889                                 ++actions_n;
17890                                 /* Either G or Y will set the RSS. */
17891                                 rss_color[i] = act->conf;
17892                                 break;
17893                         case RTE_FLOW_ACTION_TYPE_JUMP:
17894                                 ret = flow_dv_validate_action_jump(dev,
17895                                         NULL, act, action_flags[i],
17896                                         attr, true, &flow_err);
17897                                 if (ret)
17898                                         return -rte_mtr_error_set(error,
17899                                           ENOTSUP,
17900                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17901                                           NULL, flow_err.message ?
17902                                           flow_err.message :
17903                                           "Jump action validate check fail");
17904                                 ++actions_n;
17905                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17906                                 break;
17907                         /*
17908                          * Only the last meter in the hierarchy will support
17909                          * the YELLOW color steering. Then in the meter policy
17910                          * actions list, there should be no other meter inside.
17911                          */
17912                         case RTE_FLOW_ACTION_TYPE_METER:
17913                                 if (i != RTE_COLOR_GREEN)
17914                                         return -rte_mtr_error_set(error,
17915                                                 ENOTSUP,
17916                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17917                                                 NULL,
17918                                                 "Meter hierarchy only supports GREEN color.");
17919                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17920                                         return -rte_mtr_error_set(error,
17921                                                 ENOTSUP,
17922                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17923                                                 NULL,
17924                                                 "No yellow policy should be provided in meter hierarchy.");
17925                                 mtr = act->conf;
17926                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17927                                                         mtr->mtr_id,
17928                                                         action_flags[i],
17929                                                         is_rss,
17930                                                         &hierarchy_domain,
17931                                                         error);
17932                                 if (ret)
17933                                         return ret;
17934                                 ++actions_n;
17935                                 action_flags[i] |=
17936                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17937                                 break;
17938                         default:
17939                                 return -rte_mtr_error_set(error, ENOTSUP,
17940                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17941                                         NULL,
17942                                         "Doesn't support optional action");
17943                         }
17944                 }
17945                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
17946                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17947                 } else if ((action_flags[i] &
17948                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17949                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
17950                         /*
17951                          * Only support MLX5_XMETA_MODE_LEGACY
17952                          * so MARK action is only in ingress domain.
17953                          */
17954                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17955                 } else {
17956                         domain_color[i] = def_domain;
17957                         if (action_flags[i] &&
17958                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17959                                 domain_color[i] &=
17960                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17961                 }
17962                 if (action_flags[i] &
17963                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17964                         domain_color[i] &= hierarchy_domain;
17965                 /*
17966                  * Non-termination actions only support NIC Tx domain.
17967                  * The adjustion should be skipped when there is no
17968                  * action or only END is provided. The default domains
17969                  * bit-mask is set to find the MIN intersection.
17970                  * The action flags checking should also be skipped.
17971                  */
17972                 if ((def_green && i == RTE_COLOR_GREEN) ||
17973                     (def_yellow && i == RTE_COLOR_YELLOW))
17974                         continue;
17975                 /*
17976                  * Validate the drop action mutual exclusion
17977                  * with other actions. Drop action is mutually-exclusive
17978                  * with any other action, except for Count action.
17979                  */
17980                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
17981                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
17982                         return -rte_mtr_error_set(error, ENOTSUP,
17983                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17984                                 NULL, "Drop action is mutually-exclusive "
17985                                 "with any other action");
17986                 }
17987                 /* Eswitch has few restrictions on using items and actions */
17988                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17989                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17990                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
17991                                 return -rte_mtr_error_set(error, ENOTSUP,
17992                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17993                                         NULL, "unsupported action MARK");
17994                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
17995                                 return -rte_mtr_error_set(error, ENOTSUP,
17996                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17997                                         NULL, "unsupported action QUEUE");
17998                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
17999                                 return -rte_mtr_error_set(error, ENOTSUP,
18000                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18001                                         NULL, "unsupported action RSS");
18002                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18003                                 return -rte_mtr_error_set(error, ENOTSUP,
18004                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18005                                         NULL, "no fate action is found");
18006                 } else {
18007                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18008                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18009                                 if ((domain_color[i] &
18010                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18011                                         domain_color[i] =
18012                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18013                                 else
18014                                         return -rte_mtr_error_set(error,
18015                                                 ENOTSUP,
18016                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18017                                                 NULL,
18018                                                 "no fate action is found");
18019                         }
18020                 }
18021         }
18022         /* If both colors have RSS, the attributes should be the same. */
18023         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18024                                            rss_color[RTE_COLOR_YELLOW]))
18025                 return -rte_mtr_error_set(error, EINVAL,
18026                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18027                                           NULL, "policy RSS attr conflict");
18028         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18029                 *is_rss = true;
18030         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18031         if (!def_green && !def_yellow &&
18032             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18033             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18034             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18035                 return -rte_mtr_error_set(error, EINVAL,
18036                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18037                                           NULL, "policy domains conflict");
18038         /*
18039          * At least one color policy is listed in the actions, the domains
18040          * to be supported should be the intersection.
18041          */
18042         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18043                          domain_color[RTE_COLOR_YELLOW];
18044         return 0;
18045 }
18046
18047 static int
18048 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18049 {
18050         struct mlx5_priv *priv = dev->data->dev_private;
18051         int ret = 0;
18052
18053         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18054                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18055                                                 flags);
18056                 if (ret != 0)
18057                         return ret;
18058         }
18059         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18060                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18061                 if (ret != 0)
18062                         return ret;
18063         }
18064         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18065                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18066                 if (ret != 0)
18067                         return ret;
18068         }
18069         return 0;
18070 }
18071
18072 /**
18073  * Discover the number of available flow priorities
18074  * by trying to create a flow with the highest priority value
18075  * for each possible number.
18076  *
18077  * @param[in] dev
18078  *   Ethernet device.
18079  * @param[in] vprio
18080  *   List of possible number of available priorities.
18081  * @param[in] vprio_n
18082  *   Size of @p vprio array.
18083  * @return
18084  *   On success, number of available flow priorities.
18085  *   On failure, a negative errno-style code and rte_errno is set.
18086  */
18087 static int
18088 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18089                             const uint16_t *vprio, int vprio_n)
18090 {
18091         struct mlx5_priv *priv = dev->data->dev_private;
18092         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18093         struct rte_flow_item_eth eth;
18094         struct rte_flow_item item = {
18095                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18096                 .spec = &eth,
18097                 .mask = &eth,
18098         };
18099         struct mlx5_flow_dv_matcher matcher = {
18100                 .mask = {
18101                         .size = sizeof(matcher.mask.buf),
18102                 },
18103         };
18104         union mlx5_flow_tbl_key tbl_key;
18105         struct mlx5_flow flow;
18106         void *action;
18107         struct rte_flow_error error;
18108         uint8_t misc_mask;
18109         int i, err, ret = -ENOTSUP;
18110
18111         /*
18112          * Prepare a flow with a catch-all pattern and a drop action.
18113          * Use drop queue, because shared drop action may be unavailable.
18114          */
18115         action = priv->drop_queue.hrxq->action;
18116         if (action == NULL) {
18117                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18118                 rte_errno = ENOTSUP;
18119                 return -rte_errno;
18120         }
18121         memset(&flow, 0, sizeof(flow));
18122         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18123         if (flow.handle == NULL) {
18124                 DRV_LOG(ERR, "Cannot create flow handle");
18125                 rte_errno = ENOMEM;
18126                 return -rte_errno;
18127         }
18128         flow.ingress = true;
18129         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18130         flow.dv.actions[0] = action;
18131         flow.dv.actions_n = 1;
18132         memset(&eth, 0, sizeof(eth));
18133         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18134                                    &item, /* inner */ false, /* group */ 0);
18135         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18136         for (i = 0; i < vprio_n; i++) {
18137                 /* Configure the next proposed maximum priority. */
18138                 matcher.priority = vprio[i] - 1;
18139                 memset(&tbl_key, 0, sizeof(tbl_key));
18140                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18141                                                /* tunnel */ NULL,
18142                                                /* group */ 0,
18143                                                &error);
18144                 if (err != 0) {
18145                         /* This action is pure SW and must always succeed. */
18146                         DRV_LOG(ERR, "Cannot register matcher");
18147                         ret = -rte_errno;
18148                         break;
18149                 }
18150                 /* Try to apply the flow to HW. */
18151                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18152                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18153                 err = mlx5_flow_os_create_flow
18154                                 (flow.handle->dvh.matcher->matcher_object,
18155                                  (void *)&flow.dv.value, flow.dv.actions_n,
18156                                  flow.dv.actions, &flow.handle->drv_flow);
18157                 if (err == 0) {
18158                         claim_zero(mlx5_flow_os_destroy_flow
18159                                                 (flow.handle->drv_flow));
18160                         flow.handle->drv_flow = NULL;
18161                 }
18162                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18163                 if (err != 0)
18164                         break;
18165                 ret = vprio[i];
18166         }
18167         mlx5_ipool_free(pool, flow.handle_idx);
18168         /* Set rte_errno if no expected priority value matched. */
18169         if (ret < 0)
18170                 rte_errno = -ret;
18171         return ret;
18172 }
18173
18174 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18175         .validate = flow_dv_validate,
18176         .prepare = flow_dv_prepare,
18177         .translate = flow_dv_translate,
18178         .apply = flow_dv_apply,
18179         .remove = flow_dv_remove,
18180         .destroy = flow_dv_destroy,
18181         .query = flow_dv_query,
18182         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18183         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18184         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18185         .create_meter = flow_dv_mtr_alloc,
18186         .free_meter = flow_dv_aso_mtr_release_to_pool,
18187         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18188         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18189         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18190         .create_policy_rules = flow_dv_create_policy_rules,
18191         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18192         .create_def_policy = flow_dv_create_def_policy,
18193         .destroy_def_policy = flow_dv_destroy_def_policy,
18194         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18195         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18196         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18197         .counter_alloc = flow_dv_counter_allocate,
18198         .counter_free = flow_dv_counter_free,
18199         .counter_query = flow_dv_counter_query,
18200         .get_aged_flows = flow_dv_get_aged_flows,
18201         .action_validate = flow_dv_action_validate,
18202         .action_create = flow_dv_action_create,
18203         .action_destroy = flow_dv_action_destroy,
18204         .action_update = flow_dv_action_update,
18205         .action_query = flow_dv_action_query,
18206         .sync_domain = flow_dv_sync_domain,
18207         .discover_priorities = flow_dv_discover_priorities,
18208         .item_create = flow_dv_item_create,
18209         .item_release = flow_dv_item_release,
18210 };
18211
18212 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18213