net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1387                            enum rte_flow_field_id field, int inherit,
1388                            const struct rte_flow_attr *attr,
1389                            struct rte_flow_error *error)
1390 {
1391         struct mlx5_priv *priv = dev->data->dev_private;
1392
1393         switch (field) {
1394         case RTE_FLOW_FIELD_START:
1395                 return 32;
1396         case RTE_FLOW_FIELD_MAC_DST:
1397         case RTE_FLOW_FIELD_MAC_SRC:
1398                 return 48;
1399         case RTE_FLOW_FIELD_VLAN_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_VLAN_ID:
1402                 return 12;
1403         case RTE_FLOW_FIELD_MAC_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_IPV4_DSCP:
1406                 return 6;
1407         case RTE_FLOW_FIELD_IPV4_TTL:
1408                 return 8;
1409         case RTE_FLOW_FIELD_IPV4_SRC:
1410         case RTE_FLOW_FIELD_IPV4_DST:
1411                 return 32;
1412         case RTE_FLOW_FIELD_IPV6_DSCP:
1413                 return 6;
1414         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1415                 return 8;
1416         case RTE_FLOW_FIELD_IPV6_SRC:
1417         case RTE_FLOW_FIELD_IPV6_DST:
1418                 return 128;
1419         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1420         case RTE_FLOW_FIELD_TCP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1423         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1424                 return 32;
1425         case RTE_FLOW_FIELD_TCP_FLAGS:
1426                 return 9;
1427         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1428         case RTE_FLOW_FIELD_UDP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_VXLAN_VNI:
1431         case RTE_FLOW_FIELD_GENEVE_VNI:
1432                 return 24;
1433         case RTE_FLOW_FIELD_GTP_TEID:
1434         case RTE_FLOW_FIELD_TAG:
1435                 return 32;
1436         case RTE_FLOW_FIELD_MARK:
1437                 return __builtin_popcount(priv->sh->dv_mark_mask);
1438         case RTE_FLOW_FIELD_META:
1439                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1440                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1441         case RTE_FLOW_FIELD_POINTER:
1442         case RTE_FLOW_FIELD_VALUE:
1443                 return inherit < 0 ? 0 : inherit;
1444         default:
1445                 MLX5_ASSERT(false);
1446         }
1447         return 0;
1448 }
1449
1450 static void
1451 mlx5_flow_field_id_to_modify_info
1452                 (const struct rte_flow_action_modify_data *data,
1453                  struct field_modify_info *info, uint32_t *mask,
1454                  uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
1455                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 4,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[1] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 0,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1487                                                     (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 0,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, off,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 4,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[1] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 0,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1517                                                     (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 0,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, off,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4, 12,
1583                                                 MLX5_MODI_OUT_SIPV6_31_0};
1584                                 if (width < 32) {
1585                                         mask[3] =
1586                                                 rte_cpu_to_be_32(0xffffffff >>
1587                                                                  (32 - width));
1588                                         width = 0;
1589                                 } else {
1590                                         mask[3] = RTE_BE32(0xffffffff);
1591                                         width -= 32;
1592                                 }
1593                                 if (!width)
1594                                         break;
1595                                 ++idx;
1596                         }
1597                         if (data->offset < 64) {
1598                                 info[idx] = (struct field_modify_info){4, 8,
1599                                                 MLX5_MODI_OUT_SIPV6_63_32};
1600                                 if (width < 32) {
1601                                         mask[2] =
1602                                                 rte_cpu_to_be_32(0xffffffff >>
1603                                                                  (32 - width));
1604                                         width = 0;
1605                                 } else {
1606                                         mask[2] = RTE_BE32(0xffffffff);
1607                                         width -= 32;
1608                                 }
1609                                 if (!width)
1610                                         break;
1611                                 ++idx;
1612                         }
1613                         if (data->offset < 96) {
1614                                 info[idx] = (struct field_modify_info){4, 4,
1615                                                 MLX5_MODI_OUT_SIPV6_95_64};
1616                                 if (width < 32) {
1617                                         mask[1] =
1618                                                 rte_cpu_to_be_32(0xffffffff >>
1619                                                                  (32 - width));
1620                                         width = 0;
1621                                 } else {
1622                                         mask[1] = RTE_BE32(0xffffffff);
1623                                         width -= 32;
1624                                 }
1625                                 if (!width)
1626                                         break;
1627                                 ++idx;
1628                         }
1629                         info[idx] = (struct field_modify_info){4, 0,
1630                                                 MLX5_MODI_OUT_SIPV6_127_96};
1631                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1632                 } else {
1633                         if (data->offset < 32)
1634                                 info[idx++] = (struct field_modify_info){4, 0,
1635                                                 MLX5_MODI_OUT_SIPV6_31_0};
1636                         if (data->offset < 64)
1637                                 info[idx++] = (struct field_modify_info){4, 0,
1638                                                 MLX5_MODI_OUT_SIPV6_63_32};
1639                         if (data->offset < 96)
1640                                 info[idx++] = (struct field_modify_info){4, 0,
1641                                                 MLX5_MODI_OUT_SIPV6_95_64};
1642                         if (data->offset < 128)
1643                                 info[idx++] = (struct field_modify_info){4, 0,
1644                                                 MLX5_MODI_OUT_SIPV6_127_96};
1645                 }
1646                 break;
1647         case RTE_FLOW_FIELD_IPV6_DST:
1648                 if (mask) {
1649                         if (data->offset < 32) {
1650                                 info[idx] = (struct field_modify_info){4, 12,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[3] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[3] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4, 8,
1667                                                 MLX5_MODI_OUT_DIPV6_63_32};
1668                                 if (width < 32) {
1669                                         mask[2] =
1670                                                 rte_cpu_to_be_32(0xffffffff >>
1671                                                                  (32 - width));
1672                                         width = 0;
1673                                 } else {
1674                                         mask[2] = RTE_BE32(0xffffffff);
1675                                         width -= 32;
1676                                 }
1677                                 if (!width)
1678                                         break;
1679                                 ++idx;
1680                         }
1681                         if (data->offset < 96) {
1682                                 info[idx] = (struct field_modify_info){4, 4,
1683                                                 MLX5_MODI_OUT_DIPV6_95_64};
1684                                 if (width < 32) {
1685                                         mask[1] =
1686                                                 rte_cpu_to_be_32(0xffffffff >>
1687                                                                  (32 - width));
1688                                         width = 0;
1689                                 } else {
1690                                         mask[1] = RTE_BE32(0xffffffff);
1691                                         width -= 32;
1692                                 }
1693                                 if (!width)
1694                                         break;
1695                                 ++idx;
1696                         }
1697                         info[idx] = (struct field_modify_info){4, 0,
1698                                                 MLX5_MODI_OUT_DIPV6_127_96};
1699                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1700                 } else {
1701                         if (data->offset < 32)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_31_0};
1704                         if (data->offset < 64)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_63_32};
1707                         if (data->offset < 96)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_95_64};
1710                         if (data->offset < 128)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_127_96};
1713                 }
1714                 break;
1715         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1716                 info[idx] = (struct field_modify_info){2, 0,
1717                                         MLX5_MODI_OUT_TCP_SPORT};
1718                 if (mask)
1719                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TCP_PORT_DST:
1722                 info[idx] = (struct field_modify_info){2, 0,
1723                                         MLX5_MODI_OUT_TCP_DPORT};
1724                 if (mask)
1725                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1726                 break;
1727         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1728                 info[idx] = (struct field_modify_info){4, 0,
1729                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1730                 if (mask)
1731                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1732                                                      (32 - width));
1733                 break;
1734         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1735                 info[idx] = (struct field_modify_info){4, 0,
1736                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1737                 if (mask)
1738                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1739                                                      (32 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_TCP_FLAGS:
1742                 info[idx] = (struct field_modify_info){2, 0,
1743                                         MLX5_MODI_OUT_TCP_FLAGS};
1744                 if (mask)
1745                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1746                 break;
1747         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1748                 info[idx] = (struct field_modify_info){2, 0,
1749                                         MLX5_MODI_OUT_UDP_SPORT};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1752                 break;
1753         case RTE_FLOW_FIELD_UDP_PORT_DST:
1754                 info[idx] = (struct field_modify_info){2, 0,
1755                                         MLX5_MODI_OUT_UDP_DPORT};
1756                 if (mask)
1757                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1758                 break;
1759         case RTE_FLOW_FIELD_VXLAN_VNI:
1760                 /* not supported yet */
1761                 break;
1762         case RTE_FLOW_FIELD_GENEVE_VNI:
1763                 /* not supported yet*/
1764                 break;
1765         case RTE_FLOW_FIELD_GTP_TEID:
1766                 info[idx] = (struct field_modify_info){4, 0,
1767                                         MLX5_MODI_GTP_TEID};
1768                 if (mask)
1769                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1770                                                      (32 - width));
1771                 break;
1772         case RTE_FLOW_FIELD_TAG:
1773                 {
1774                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1775                                                    data->level, error);
1776                         if (reg < 0)
1777                                 return;
1778                         MLX5_ASSERT(reg != REG_NON);
1779                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1780                         info[idx] = (struct field_modify_info){4, 0,
1781                                                 reg_to_field[reg]};
1782                         if (mask)
1783                                 mask[idx] =
1784                                         rte_cpu_to_be_32(0xffffffff >>
1785                                                          (32 - width));
1786                 }
1787                 break;
1788         case RTE_FLOW_FIELD_MARK:
1789                 {
1790                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1791                         uint32_t mark_count = __builtin_popcount(mark_mask);
1792                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1793                                                        0, error);
1794                         if (reg < 0)
1795                                 return;
1796                         MLX5_ASSERT(reg != REG_NON);
1797                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1798                         info[idx] = (struct field_modify_info){4, 0,
1799                                                 reg_to_field[reg]};
1800                         if (mask)
1801                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1802                                          (mark_count - width)) & mark_mask);
1803                 }
1804                 break;
1805         case RTE_FLOW_FIELD_META:
1806                 {
1807                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1808                         uint32_t meta_count = __builtin_popcount(meta_mask);
1809                         uint32_t msk_c0 =
1810                                 rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
1811                         uint32_t shl_c0 = rte_bsf32(msk_c0);
1812                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1813                         if (reg < 0)
1814                                 return;
1815                         MLX5_ASSERT(reg != REG_NON);
1816                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1817                         if (reg == REG_C_0)
1818                                 *shift = shl_c0;
1819                         info[idx] = (struct field_modify_info){4, 0,
1820                                                 reg_to_field[reg]};
1821                         if (mask)
1822                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1823                                         (meta_count - width)) & meta_mask);
1824                 }
1825                 break;
1826         case RTE_FLOW_FIELD_POINTER:
1827         case RTE_FLOW_FIELD_VALUE:
1828         default:
1829                 MLX5_ASSERT(false);
1830                 break;
1831         }
1832 }
1833
1834 /**
1835  * Convert modify_field action to DV specification.
1836  *
1837  * @param[in] dev
1838  *   Pointer to the rte_eth_dev structure.
1839  * @param[in,out] resource
1840  *   Pointer to the modify-header resource.
1841  * @param[in] action
1842  *   Pointer to action specification.
1843  * @param[in] attr
1844  *   Attributes of flow that includes this item.
1845  * @param[out] error
1846  *   Pointer to the error structure.
1847  *
1848  * @return
1849  *   0 on success, a negative errno value otherwise and rte_errno is set.
1850  */
1851 static int
1852 flow_dv_convert_action_modify_field
1853                         (struct rte_eth_dev *dev,
1854                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1855                          const struct rte_flow_action *action,
1856                          const struct rte_flow_attr *attr,
1857                          struct rte_flow_error *error)
1858 {
1859         const struct rte_flow_action_modify_field *conf =
1860                 (const struct rte_flow_action_modify_field *)(action->conf);
1861         struct rte_flow_item item = {
1862                 .spec = NULL,
1863                 .mask = NULL
1864         };
1865         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1866                                                                 {0, 0, 0} };
1867         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1868                                                                 {0, 0, 0} };
1869         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1870         uint32_t type;
1871         uint32_t shift = 0;
1872
1873         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1874             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1875                 type = MLX5_MODIFICATION_TYPE_SET;
1876                 /** For SET fill the destination field (field) first. */
1877                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1878                                                   conf->width, &shift, dev,
1879                                                   attr, error);
1880                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1881                                         (void *)(uintptr_t)conf->src.pvalue :
1882                                         (void *)(uintptr_t)&conf->src.value;
1883         } else {
1884                 type = MLX5_MODIFICATION_TYPE_COPY;
1885                 /** For COPY fill the destination field (dcopy) without mask. */
1886                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1887                                                   conf->width, &shift, dev,
1888                                                   attr, error);
1889                 /** Then construct the source field (field) with mask. */
1890                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1891                                                   conf->width, &shift,
1892                                                   dev, attr, error);
1893         }
1894         item.mask = &mask;
1895         return flow_dv_convert_modify_action(&item,
1896                         field, dcopy, resource, type, error);
1897 }
1898
1899 /**
1900  * Validate MARK item.
1901  *
1902  * @param[in] dev
1903  *   Pointer to the rte_eth_dev structure.
1904  * @param[in] item
1905  *   Item specification.
1906  * @param[in] attr
1907  *   Attributes of flow that includes this item.
1908  * @param[out] error
1909  *   Pointer to error structure.
1910  *
1911  * @return
1912  *   0 on success, a negative errno value otherwise and rte_errno is set.
1913  */
1914 static int
1915 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1916                            const struct rte_flow_item *item,
1917                            const struct rte_flow_attr *attr __rte_unused,
1918                            struct rte_flow_error *error)
1919 {
1920         struct mlx5_priv *priv = dev->data->dev_private;
1921         struct mlx5_dev_config *config = &priv->config;
1922         const struct rte_flow_item_mark *spec = item->spec;
1923         const struct rte_flow_item_mark *mask = item->mask;
1924         const struct rte_flow_item_mark nic_mask = {
1925                 .id = priv->sh->dv_mark_mask,
1926         };
1927         int ret;
1928
1929         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1930                 return rte_flow_error_set(error, ENOTSUP,
1931                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1932                                           "extended metadata feature"
1933                                           " isn't enabled");
1934         if (!mlx5_flow_ext_mreg_supported(dev))
1935                 return rte_flow_error_set(error, ENOTSUP,
1936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1937                                           "extended metadata register"
1938                                           " isn't supported");
1939         if (!nic_mask.id)
1940                 return rte_flow_error_set(error, ENOTSUP,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "extended metadata register"
1943                                           " isn't available");
1944         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1945         if (ret < 0)
1946                 return ret;
1947         if (!spec)
1948                 return rte_flow_error_set(error, EINVAL,
1949                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1950                                           item->spec,
1951                                           "data cannot be empty");
1952         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1953                 return rte_flow_error_set(error, EINVAL,
1954                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1955                                           &spec->id,
1956                                           "mark id exceeds the limit");
1957         if (!mask)
1958                 mask = &nic_mask;
1959         if (!mask->id)
1960                 return rte_flow_error_set(error, EINVAL,
1961                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1962                                         "mask cannot be zero");
1963
1964         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1965                                         (const uint8_t *)&nic_mask,
1966                                         sizeof(struct rte_flow_item_mark),
1967                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1968         if (ret < 0)
1969                 return ret;
1970         return 0;
1971 }
1972
1973 /**
1974  * Validate META item.
1975  *
1976  * @param[in] dev
1977  *   Pointer to the rte_eth_dev structure.
1978  * @param[in] item
1979  *   Item specification.
1980  * @param[in] attr
1981  *   Attributes of flow that includes this item.
1982  * @param[out] error
1983  *   Pointer to error structure.
1984  *
1985  * @return
1986  *   0 on success, a negative errno value otherwise and rte_errno is set.
1987  */
1988 static int
1989 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1990                            const struct rte_flow_item *item,
1991                            const struct rte_flow_attr *attr,
1992                            struct rte_flow_error *error)
1993 {
1994         struct mlx5_priv *priv = dev->data->dev_private;
1995         struct mlx5_dev_config *config = &priv->config;
1996         const struct rte_flow_item_meta *spec = item->spec;
1997         const struct rte_flow_item_meta *mask = item->mask;
1998         struct rte_flow_item_meta nic_mask = {
1999                 .data = UINT32_MAX
2000         };
2001         int reg;
2002         int ret;
2003
2004         if (!spec)
2005                 return rte_flow_error_set(error, EINVAL,
2006                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2007                                           item->spec,
2008                                           "data cannot be empty");
2009         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2010                 if (!mlx5_flow_ext_mreg_supported(dev))
2011                         return rte_flow_error_set(error, ENOTSUP,
2012                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2013                                           "extended metadata register"
2014                                           " isn't supported");
2015                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2016                 if (reg < 0)
2017                         return reg;
2018                 if (reg == REG_NON)
2019                         return rte_flow_error_set(error, ENOTSUP,
2020                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2021                                         "unavailable extended metadata register");
2022                 if (reg == REG_B)
2023                         return rte_flow_error_set(error, ENOTSUP,
2024                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2025                                           "match on reg_b "
2026                                           "isn't supported");
2027                 if (reg != REG_A)
2028                         nic_mask.data = priv->sh->dv_meta_mask;
2029         } else {
2030                 if (attr->transfer)
2031                         return rte_flow_error_set(error, ENOTSUP,
2032                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2033                                         "extended metadata feature "
2034                                         "should be enabled when "
2035                                         "meta item is requested "
2036                                         "with e-switch mode ");
2037                 if (attr->ingress)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "match on metadata for ingress "
2041                                         "is not supported in legacy "
2042                                         "metadata mode");
2043         }
2044         if (!mask)
2045                 mask = &rte_flow_item_meta_mask;
2046         if (!mask->data)
2047                 return rte_flow_error_set(error, EINVAL,
2048                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2049                                         "mask cannot be zero");
2050
2051         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2052                                         (const uint8_t *)&nic_mask,
2053                                         sizeof(struct rte_flow_item_meta),
2054                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2055         return ret;
2056 }
2057
2058 /**
2059  * Validate TAG item.
2060  *
2061  * @param[in] dev
2062  *   Pointer to the rte_eth_dev structure.
2063  * @param[in] item
2064  *   Item specification.
2065  * @param[in] attr
2066  *   Attributes of flow that includes this item.
2067  * @param[out] error
2068  *   Pointer to error structure.
2069  *
2070  * @return
2071  *   0 on success, a negative errno value otherwise and rte_errno is set.
2072  */
2073 static int
2074 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2075                           const struct rte_flow_item *item,
2076                           const struct rte_flow_attr *attr __rte_unused,
2077                           struct rte_flow_error *error)
2078 {
2079         const struct rte_flow_item_tag *spec = item->spec;
2080         const struct rte_flow_item_tag *mask = item->mask;
2081         const struct rte_flow_item_tag nic_mask = {
2082                 .data = RTE_BE32(UINT32_MAX),
2083                 .index = 0xff,
2084         };
2085         int ret;
2086
2087         if (!mlx5_flow_ext_mreg_supported(dev))
2088                 return rte_flow_error_set(error, ENOTSUP,
2089                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2090                                           "extensive metadata register"
2091                                           " isn't supported");
2092         if (!spec)
2093                 return rte_flow_error_set(error, EINVAL,
2094                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2095                                           item->spec,
2096                                           "data cannot be empty");
2097         if (!mask)
2098                 mask = &rte_flow_item_tag_mask;
2099         if (!mask->data)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2102                                         "mask cannot be zero");
2103
2104         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2105                                         (const uint8_t *)&nic_mask,
2106                                         sizeof(struct rte_flow_item_tag),
2107                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2108         if (ret < 0)
2109                 return ret;
2110         if (mask->index != 0xff)
2111                 return rte_flow_error_set(error, EINVAL,
2112                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2113                                           "partial mask for tag index"
2114                                           " is not supported");
2115         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2116         if (ret < 0)
2117                 return ret;
2118         MLX5_ASSERT(ret != REG_NON);
2119         return 0;
2120 }
2121
2122 /**
2123  * Validate vport item.
2124  *
2125  * @param[in] dev
2126  *   Pointer to the rte_eth_dev structure.
2127  * @param[in] item
2128  *   Item specification.
2129  * @param[in] attr
2130  *   Attributes of flow that includes this item.
2131  * @param[in] item_flags
2132  *   Bit-fields that holds the items detected until now.
2133  * @param[out] error
2134  *   Pointer to error structure.
2135  *
2136  * @return
2137  *   0 on success, a negative errno value otherwise and rte_errno is set.
2138  */
2139 static int
2140 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2141                               const struct rte_flow_item *item,
2142                               const struct rte_flow_attr *attr,
2143                               uint64_t item_flags,
2144                               struct rte_flow_error *error)
2145 {
2146         const struct rte_flow_item_port_id *spec = item->spec;
2147         const struct rte_flow_item_port_id *mask = item->mask;
2148         const struct rte_flow_item_port_id switch_mask = {
2149                         .id = 0xffffffff,
2150         };
2151         struct mlx5_priv *esw_priv;
2152         struct mlx5_priv *dev_priv;
2153         int ret;
2154
2155         if (!attr->transfer)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM,
2158                                           NULL,
2159                                           "match on port id is valid only"
2160                                           " when transfer flag is enabled");
2161         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2162                 return rte_flow_error_set(error, ENOTSUP,
2163                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2164                                           "multiple source ports are not"
2165                                           " supported");
2166         if (!mask)
2167                 mask = &switch_mask;
2168         if (mask->id != 0xffffffff)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2171                                            mask,
2172                                            "no support for partial mask on"
2173                                            " \"id\" field");
2174         ret = mlx5_flow_item_acceptable
2175                                 (item, (const uint8_t *)mask,
2176                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2177                                  sizeof(struct rte_flow_item_port_id),
2178                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2179         if (ret)
2180                 return ret;
2181         if (!spec)
2182                 return 0;
2183         if (spec->id == MLX5_PORT_ESW_MGR)
2184                 return 0;
2185         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2186         if (!esw_priv)
2187                 return rte_flow_error_set(error, rte_errno,
2188                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2189                                           "failed to obtain E-Switch info for"
2190                                           " port");
2191         dev_priv = mlx5_dev_to_eswitch_info(dev);
2192         if (!dev_priv)
2193                 return rte_flow_error_set(error, rte_errno,
2194                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2195                                           NULL,
2196                                           "failed to obtain E-Switch info");
2197         if (esw_priv->domain_id != dev_priv->domain_id)
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2200                                           "cannot match on a port from a"
2201                                           " different E-Switch");
2202         return 0;
2203 }
2204
2205 /**
2206  * Validate VLAN item.
2207  *
2208  * @param[in] item
2209  *   Item specification.
2210  * @param[in] item_flags
2211  *   Bit-fields that holds the items detected until now.
2212  * @param[in] dev
2213  *   Ethernet device flow is being created on.
2214  * @param[out] error
2215  *   Pointer to error structure.
2216  *
2217  * @return
2218  *   0 on success, a negative errno value otherwise and rte_errno is set.
2219  */
2220 static int
2221 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2222                            uint64_t item_flags,
2223                            struct rte_eth_dev *dev,
2224                            struct rte_flow_error *error)
2225 {
2226         const struct rte_flow_item_vlan *mask = item->mask;
2227         const struct rte_flow_item_vlan nic_mask = {
2228                 .tci = RTE_BE16(UINT16_MAX),
2229                 .inner_type = RTE_BE16(UINT16_MAX),
2230                 .has_more_vlan = 1,
2231         };
2232         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2233         int ret;
2234         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2235                                         MLX5_FLOW_LAYER_INNER_L4) :
2236                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2237                                         MLX5_FLOW_LAYER_OUTER_L4);
2238         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2239                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2240
2241         if (item_flags & vlanm)
2242                 return rte_flow_error_set(error, EINVAL,
2243                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2244                                           "multiple VLAN layers not supported");
2245         else if ((item_flags & l34m) != 0)
2246                 return rte_flow_error_set(error, EINVAL,
2247                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2248                                           "VLAN cannot follow L3/L4 layer");
2249         if (!mask)
2250                 mask = &rte_flow_item_vlan_mask;
2251         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2252                                         (const uint8_t *)&nic_mask,
2253                                         sizeof(struct rte_flow_item_vlan),
2254                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2255         if (ret)
2256                 return ret;
2257         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2258                 struct mlx5_priv *priv = dev->data->dev_private;
2259
2260                 if (priv->vmwa_context) {
2261                         /*
2262                          * Non-NULL context means we have a virtual machine
2263                          * and SR-IOV enabled, we have to create VLAN interface
2264                          * to make hypervisor to setup E-Switch vport
2265                          * context correctly. We avoid creating the multiple
2266                          * VLAN interfaces, so we cannot support VLAN tag mask.
2267                          */
2268                         return rte_flow_error_set(error, EINVAL,
2269                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2270                                                   item,
2271                                                   "VLAN tag mask is not"
2272                                                   " supported in virtual"
2273                                                   " environment");
2274                 }
2275         }
2276         return 0;
2277 }
2278
2279 /*
2280  * GTP flags are contained in 1 byte of the format:
2281  * -------------------------------------------
2282  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2283  * |-----------------------------------------|
2284  * | value | Version | PT | Res | E | S | PN |
2285  * -------------------------------------------
2286  *
2287  * Matching is supported only for GTP flags E, S, PN.
2288  */
2289 #define MLX5_GTP_FLAGS_MASK     0x07
2290
2291 /**
2292  * Validate GTP item.
2293  *
2294  * @param[in] dev
2295  *   Pointer to the rte_eth_dev structure.
2296  * @param[in] item
2297  *   Item specification.
2298  * @param[in] item_flags
2299  *   Bit-fields that holds the items detected until now.
2300  * @param[out] error
2301  *   Pointer to error structure.
2302  *
2303  * @return
2304  *   0 on success, a negative errno value otherwise and rte_errno is set.
2305  */
2306 static int
2307 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2308                           const struct rte_flow_item *item,
2309                           uint64_t item_flags,
2310                           struct rte_flow_error *error)
2311 {
2312         struct mlx5_priv *priv = dev->data->dev_private;
2313         const struct rte_flow_item_gtp *spec = item->spec;
2314         const struct rte_flow_item_gtp *mask = item->mask;
2315         const struct rte_flow_item_gtp nic_mask = {
2316                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2317                 .msg_type = 0xff,
2318                 .teid = RTE_BE32(0xffffffff),
2319         };
2320
2321         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2322                 return rte_flow_error_set(error, ENOTSUP,
2323                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2324                                           "GTP support is not enabled");
2325         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2326                 return rte_flow_error_set(error, ENOTSUP,
2327                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2328                                           "multiple tunnel layers not"
2329                                           " supported");
2330         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2331                 return rte_flow_error_set(error, EINVAL,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "no outer UDP layer found");
2334         if (!mask)
2335                 mask = &rte_flow_item_gtp_mask;
2336         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2337                 return rte_flow_error_set(error, ENOTSUP,
2338                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2339                                           "Match is supported for GTP"
2340                                           " flags only");
2341         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2342                                          (const uint8_t *)&nic_mask,
2343                                          sizeof(struct rte_flow_item_gtp),
2344                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2345 }
2346
2347 /**
2348  * Validate GTP PSC item.
2349  *
2350  * @param[in] item
2351  *   Item specification.
2352  * @param[in] last_item
2353  *   Previous validated item in the pattern items.
2354  * @param[in] gtp_item
2355  *   Previous GTP item specification.
2356  * @param[in] attr
2357  *   Pointer to flow attributes.
2358  * @param[out] error
2359  *   Pointer to error structure.
2360  *
2361  * @return
2362  *   0 on success, a negative errno value otherwise and rte_errno is set.
2363  */
2364 static int
2365 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2366                               uint64_t last_item,
2367                               const struct rte_flow_item *gtp_item,
2368                               const struct rte_flow_attr *attr,
2369                               struct rte_flow_error *error)
2370 {
2371         const struct rte_flow_item_gtp *gtp_spec;
2372         const struct rte_flow_item_gtp *gtp_mask;
2373         const struct rte_flow_item_gtp_psc *mask;
2374         const struct rte_flow_item_gtp_psc nic_mask = {
2375                 .hdr.type = 0xF,
2376                 .hdr.qfi = 0x3F,
2377         };
2378
2379         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2380                 return rte_flow_error_set
2381                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2382                          "GTP PSC item must be preceded with GTP item");
2383         gtp_spec = gtp_item->spec;
2384         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2385         /* GTP spec and E flag is requested to match zero. */
2386         if (gtp_spec &&
2387                 (gtp_mask->v_pt_rsv_flags &
2388                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2389                 return rte_flow_error_set
2390                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2391                          "GTP E flag must be 1 to match GTP PSC");
2392         /* Check the flow is not created in group zero. */
2393         if (!attr->transfer && !attr->group)
2394                 return rte_flow_error_set
2395                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2396                          "GTP PSC is not supported for group 0");
2397         /* GTP spec is here and E flag is requested to match zero. */
2398         if (!item->spec)
2399                 return 0;
2400         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2401         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2402                                          (const uint8_t *)&nic_mask,
2403                                          sizeof(struct rte_flow_item_gtp_psc),
2404                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2405 }
2406
2407 /**
2408  * Validate IPV4 item.
2409  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2410  * add specific validation of fragment_offset field,
2411  *
2412  * @param[in] item
2413  *   Item specification.
2414  * @param[in] item_flags
2415  *   Bit-fields that holds the items detected until now.
2416  * @param[out] error
2417  *   Pointer to error structure.
2418  *
2419  * @return
2420  *   0 on success, a negative errno value otherwise and rte_errno is set.
2421  */
2422 static int
2423 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2424                            const struct rte_flow_item *item,
2425                            uint64_t item_flags, uint64_t last_item,
2426                            uint16_t ether_type, struct rte_flow_error *error)
2427 {
2428         int ret;
2429         struct mlx5_priv *priv = dev->data->dev_private;
2430         const struct rte_flow_item_ipv4 *spec = item->spec;
2431         const struct rte_flow_item_ipv4 *last = item->last;
2432         const struct rte_flow_item_ipv4 *mask = item->mask;
2433         rte_be16_t fragment_offset_spec = 0;
2434         rte_be16_t fragment_offset_last = 0;
2435         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2436                 .hdr = {
2437                         .src_addr = RTE_BE32(0xffffffff),
2438                         .dst_addr = RTE_BE32(0xffffffff),
2439                         .type_of_service = 0xff,
2440                         .fragment_offset = RTE_BE16(0xffff),
2441                         .next_proto_id = 0xff,
2442                         .time_to_live = 0xff,
2443                 },
2444         };
2445
2446         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2447                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2448                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2449                                priv->config.hca_attr.inner_ipv4_ihl;
2450                 if (!ihl_cap)
2451                         return rte_flow_error_set(error, ENOTSUP,
2452                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2453                                                   item,
2454                                                   "IPV4 ihl offload not supported");
2455                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2456         }
2457         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2458                                            ether_type, &nic_ipv4_mask,
2459                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2460         if (ret < 0)
2461                 return ret;
2462         if (spec && mask)
2463                 fragment_offset_spec = spec->hdr.fragment_offset &
2464                                        mask->hdr.fragment_offset;
2465         if (!fragment_offset_spec)
2466                 return 0;
2467         /*
2468          * spec and mask are valid, enforce using full mask to make sure the
2469          * complete value is used correctly.
2470          */
2471         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2472                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2475                                           item, "must use full mask for"
2476                                           " fragment_offset");
2477         /*
2478          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2479          * indicating this is 1st fragment of fragmented packet.
2480          * This is not yet supported in MLX5, return appropriate error message.
2481          */
2482         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2483                 return rte_flow_error_set(error, ENOTSUP,
2484                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2485                                           "match on first fragment not "
2486                                           "supported");
2487         if (fragment_offset_spec && !last)
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "specified value not supported");
2491         /* spec and last are valid, validate the specified range. */
2492         fragment_offset_last = last->hdr.fragment_offset &
2493                                mask->hdr.fragment_offset;
2494         /*
2495          * Match on fragment_offset spec 0x2001 and last 0x3fff
2496          * means MF is 1 and frag-offset is > 0.
2497          * This packet is fragment 2nd and onward, excluding last.
2498          * This is not yet supported in MLX5, return appropriate
2499          * error message.
2500          */
2501         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2502             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2505                                           last, "match on following "
2506                                           "fragments not supported");
2507         /*
2508          * Match on fragment_offset spec 0x0001 and last 0x1fff
2509          * means MF is 0 and frag-offset is > 0.
2510          * This packet is last fragment of fragmented packet.
2511          * This is not yet supported in MLX5, return appropriate
2512          * error message.
2513          */
2514         if (fragment_offset_spec == RTE_BE16(1) &&
2515             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2516                 return rte_flow_error_set(error, ENOTSUP,
2517                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2518                                           last, "match on last "
2519                                           "fragment not supported");
2520         /*
2521          * Match on fragment_offset spec 0x0001 and last 0x3fff
2522          * means MF and/or frag-offset is not 0.
2523          * This is a fragmented packet.
2524          * Other range values are invalid and rejected.
2525          */
2526         if (!(fragment_offset_spec == RTE_BE16(1) &&
2527               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2528                 return rte_flow_error_set(error, ENOTSUP,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2530                                           "specified range not supported");
2531         return 0;
2532 }
2533
2534 /**
2535  * Validate IPV6 fragment extension item.
2536  *
2537  * @param[in] item
2538  *   Item specification.
2539  * @param[in] item_flags
2540  *   Bit-fields that holds the items detected until now.
2541  * @param[out] error
2542  *   Pointer to error structure.
2543  *
2544  * @return
2545  *   0 on success, a negative errno value otherwise and rte_errno is set.
2546  */
2547 static int
2548 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2549                                     uint64_t item_flags,
2550                                     struct rte_flow_error *error)
2551 {
2552         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2553         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2554         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2555         rte_be16_t frag_data_spec = 0;
2556         rte_be16_t frag_data_last = 0;
2557         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2558         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2559                                       MLX5_FLOW_LAYER_OUTER_L4;
2560         int ret = 0;
2561         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2562                 .hdr = {
2563                         .next_header = 0xff,
2564                         .frag_data = RTE_BE16(0xffff),
2565                 },
2566         };
2567
2568         if (item_flags & l4m)
2569                 return rte_flow_error_set(error, EINVAL,
2570                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2571                                           "ipv6 fragment extension item cannot "
2572                                           "follow L4 item.");
2573         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2574             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2575                 return rte_flow_error_set(error, EINVAL,
2576                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2577                                           "ipv6 fragment extension item must "
2578                                           "follow ipv6 item");
2579         if (spec && mask)
2580                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2581         if (!frag_data_spec)
2582                 return 0;
2583         /*
2584          * spec and mask are valid, enforce using full mask to make sure the
2585          * complete value is used correctly.
2586          */
2587         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2588                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2591                                           item, "must use full mask for"
2592                                           " frag_data");
2593         /*
2594          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2595          * This is 1st fragment of fragmented packet.
2596          */
2597         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2598                 return rte_flow_error_set(error, ENOTSUP,
2599                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2600                                           "match on first fragment not "
2601                                           "supported");
2602         if (frag_data_spec && !last)
2603                 return rte_flow_error_set(error, EINVAL,
2604                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2605                                           "specified value not supported");
2606         ret = mlx5_flow_item_acceptable
2607                                 (item, (const uint8_t *)mask,
2608                                  (const uint8_t *)&nic_mask,
2609                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2610                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2611         if (ret)
2612                 return ret;
2613         /* spec and last are valid, validate the specified range. */
2614         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2615         /*
2616          * Match on frag_data spec 0x0009 and last 0xfff9
2617          * means M is 1 and frag-offset is > 0.
2618          * This packet is fragment 2nd and onward, excluding last.
2619          * This is not yet supported in MLX5, return appropriate
2620          * error message.
2621          */
2622         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2623                                        RTE_IPV6_EHDR_MF_MASK) &&
2624             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2625                 return rte_flow_error_set(error, ENOTSUP,
2626                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2627                                           last, "match on following "
2628                                           "fragments not supported");
2629         /*
2630          * Match on frag_data spec 0x0008 and last 0xfff8
2631          * means M is 0 and frag-offset is > 0.
2632          * This packet is last fragment of fragmented packet.
2633          * This is not yet supported in MLX5, return appropriate
2634          * error message.
2635          */
2636         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2637             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2640                                           last, "match on last "
2641                                           "fragment not supported");
2642         /* Other range values are invalid and rejected. */
2643         return rte_flow_error_set(error, EINVAL,
2644                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2645                                   "specified range not supported");
2646 }
2647
2648 /*
2649  * Validate ASO CT item.
2650  *
2651  * @param[in] dev
2652  *   Pointer to the rte_eth_dev structure.
2653  * @param[in] item
2654  *   Item specification.
2655  * @param[in] item_flags
2656  *   Pointer to bit-fields that holds the items detected until now.
2657  * @param[out] error
2658  *   Pointer to error structure.
2659  *
2660  * @return
2661  *   0 on success, a negative errno value otherwise and rte_errno is set.
2662  */
2663 static int
2664 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2665                              const struct rte_flow_item *item,
2666                              uint64_t *item_flags,
2667                              struct rte_flow_error *error)
2668 {
2669         const struct rte_flow_item_conntrack *spec = item->spec;
2670         const struct rte_flow_item_conntrack *mask = item->mask;
2671         RTE_SET_USED(dev);
2672         uint32_t flags;
2673
2674         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2677                                           "Only one CT is supported");
2678         if (!mask)
2679                 mask = &rte_flow_item_conntrack_mask;
2680         flags = spec->flags & mask->flags;
2681         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2682             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2683              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2684              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2685                 return rte_flow_error_set(error, EINVAL,
2686                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2687                                           "Conflict status bits");
2688         /* State change also needs to be considered. */
2689         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2690         return 0;
2691 }
2692
2693 /**
2694  * Validate the pop VLAN action.
2695  *
2696  * @param[in] dev
2697  *   Pointer to the rte_eth_dev structure.
2698  * @param[in] action_flags
2699  *   Holds the actions detected until now.
2700  * @param[in] action
2701  *   Pointer to the pop vlan action.
2702  * @param[in] item_flags
2703  *   The items found in this flow rule.
2704  * @param[in] attr
2705  *   Pointer to flow attributes.
2706  * @param[out] error
2707  *   Pointer to error structure.
2708  *
2709  * @return
2710  *   0 on success, a negative errno value otherwise and rte_errno is set.
2711  */
2712 static int
2713 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2714                                  uint64_t action_flags,
2715                                  const struct rte_flow_action *action,
2716                                  uint64_t item_flags,
2717                                  const struct rte_flow_attr *attr,
2718                                  struct rte_flow_error *error)
2719 {
2720         const struct mlx5_priv *priv = dev->data->dev_private;
2721         struct mlx5_dev_ctx_shared *sh = priv->sh;
2722         bool direction_error = false;
2723
2724         if (!priv->sh->pop_vlan_action)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2727                                           NULL,
2728                                           "pop vlan action is not supported");
2729         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2730         if (attr->transfer) {
2731                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2732                 bool is_cx5 = sh->steering_format_version ==
2733                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2734
2735                 if (fdb_tx && is_cx5)
2736                         direction_error = true;
2737         } else if (attr->egress) {
2738                 direction_error = true;
2739         }
2740         if (direction_error)
2741                 return rte_flow_error_set(error, ENOTSUP,
2742                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2743                                           NULL,
2744                                           "pop vlan action not supported for egress");
2745         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2746                 return rte_flow_error_set(error, ENOTSUP,
2747                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2748                                           "no support for multiple VLAN "
2749                                           "actions");
2750         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2751         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2752             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2753                 return rte_flow_error_set(error, ENOTSUP,
2754                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2755                                           NULL,
2756                                           "cannot pop vlan after decap without "
2757                                           "match on inner vlan in the flow");
2758         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2759         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2760             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2761                 return rte_flow_error_set(error, ENOTSUP,
2762                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2763                                           NULL,
2764                                           "cannot pop vlan without a "
2765                                           "match on (outer) vlan in the flow");
2766         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2767                 return rte_flow_error_set(error, EINVAL,
2768                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2769                                           "wrong action order, port_id should "
2770                                           "be after pop VLAN action");
2771         if (!attr->transfer && priv->representor)
2772                 return rte_flow_error_set(error, ENOTSUP,
2773                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2774                                           "pop vlan action for VF representor "
2775                                           "not supported on NIC table");
2776         return 0;
2777 }
2778
2779 /**
2780  * Get VLAN default info from vlan match info.
2781  *
2782  * @param[in] items
2783  *   the list of item specifications.
2784  * @param[out] vlan
2785  *   pointer VLAN info to fill to.
2786  *
2787  * @return
2788  *   0 on success, a negative errno value otherwise and rte_errno is set.
2789  */
2790 static void
2791 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2792                                   struct rte_vlan_hdr *vlan)
2793 {
2794         const struct rte_flow_item_vlan nic_mask = {
2795                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2796                                 MLX5DV_FLOW_VLAN_VID_MASK),
2797                 .inner_type = RTE_BE16(0xffff),
2798         };
2799
2800         if (items == NULL)
2801                 return;
2802         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2803                 int type = items->type;
2804
2805                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2806                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2807                         break;
2808         }
2809         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2810                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2811                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2812
2813                 /* If VLAN item in pattern doesn't contain data, return here. */
2814                 if (!vlan_v)
2815                         return;
2816                 if (!vlan_m)
2817                         vlan_m = &nic_mask;
2818                 /* Only full match values are accepted */
2819                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2820                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2821                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2822                         vlan->vlan_tci |=
2823                                 rte_be_to_cpu_16(vlan_v->tci &
2824                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2825                 }
2826                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2827                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2828                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2829                         vlan->vlan_tci |=
2830                                 rte_be_to_cpu_16(vlan_v->tci &
2831                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2832                 }
2833                 if (vlan_m->inner_type == nic_mask.inner_type)
2834                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2835                                                            vlan_m->inner_type);
2836         }
2837 }
2838
2839 /**
2840  * Validate the push VLAN action.
2841  *
2842  * @param[in] dev
2843  *   Pointer to the rte_eth_dev structure.
2844  * @param[in] action_flags
2845  *   Holds the actions detected until now.
2846  * @param[in] item_flags
2847  *   The items found in this flow rule.
2848  * @param[in] action
2849  *   Pointer to the action structure.
2850  * @param[in] attr
2851  *   Pointer to flow attributes
2852  * @param[out] error
2853  *   Pointer to error structure.
2854  *
2855  * @return
2856  *   0 on success, a negative errno value otherwise and rte_errno is set.
2857  */
2858 static int
2859 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2860                                   uint64_t action_flags,
2861                                   const struct rte_flow_item_vlan *vlan_m,
2862                                   const struct rte_flow_action *action,
2863                                   const struct rte_flow_attr *attr,
2864                                   struct rte_flow_error *error)
2865 {
2866         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2867         const struct mlx5_priv *priv = dev->data->dev_private;
2868         struct mlx5_dev_ctx_shared *sh = priv->sh;
2869         bool direction_error = false;
2870
2871         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2872             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2873                 return rte_flow_error_set(error, EINVAL,
2874                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2875                                           "invalid vlan ethertype");
2876         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2877                 return rte_flow_error_set(error, EINVAL,
2878                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2879                                           "wrong action order, port_id should "
2880                                           "be after push VLAN");
2881         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2882         if (attr->transfer) {
2883                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2884                 bool is_cx5 = sh->steering_format_version ==
2885                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2886
2887                 if (!fdb_tx && is_cx5)
2888                         direction_error = true;
2889         } else if (attr->ingress) {
2890                 direction_error = true;
2891         }
2892         if (direction_error)
2893                 return rte_flow_error_set(error, ENOTSUP,
2894                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2895                                           NULL,
2896                                           "push vlan action not supported for ingress");
2897         if (!attr->transfer && priv->representor)
2898                 return rte_flow_error_set(error, ENOTSUP,
2899                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2900                                           "push vlan action for VF representor "
2901                                           "not supported on NIC table");
2902         if (vlan_m &&
2903             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2904             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2905                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2906             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2907             !(mlx5_flow_find_action
2908                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2909                 return rte_flow_error_set(error, EINVAL,
2910                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2911                                           "not full match mask on VLAN PCP and "
2912                                           "there is no of_set_vlan_pcp action, "
2913                                           "push VLAN action cannot figure out "
2914                                           "PCP value");
2915         if (vlan_m &&
2916             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2917             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2918                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2919             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2920             !(mlx5_flow_find_action
2921                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2922                 return rte_flow_error_set(error, EINVAL,
2923                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2924                                           "not full match mask on VLAN VID and "
2925                                           "there is no of_set_vlan_vid action, "
2926                                           "push VLAN action cannot figure out "
2927                                           "VID value");
2928         (void)attr;
2929         return 0;
2930 }
2931
2932 /**
2933  * Validate the set VLAN PCP.
2934  *
2935  * @param[in] action_flags
2936  *   Holds the actions detected until now.
2937  * @param[in] actions
2938  *   Pointer to the list of actions remaining in the flow rule.
2939  * @param[out] error
2940  *   Pointer to error structure.
2941  *
2942  * @return
2943  *   0 on success, a negative errno value otherwise and rte_errno is set.
2944  */
2945 static int
2946 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2947                                      const struct rte_flow_action actions[],
2948                                      struct rte_flow_error *error)
2949 {
2950         const struct rte_flow_action *action = actions;
2951         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2952
2953         if (conf->vlan_pcp > 7)
2954                 return rte_flow_error_set(error, EINVAL,
2955                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2956                                           "VLAN PCP value is too big");
2957         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2958                 return rte_flow_error_set(error, ENOTSUP,
2959                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2960                                           "set VLAN PCP action must follow "
2961                                           "the push VLAN action");
2962         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2963                 return rte_flow_error_set(error, ENOTSUP,
2964                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2965                                           "Multiple VLAN PCP modification are "
2966                                           "not supported");
2967         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2968                 return rte_flow_error_set(error, EINVAL,
2969                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2970                                           "wrong action order, port_id should "
2971                                           "be after set VLAN PCP");
2972         return 0;
2973 }
2974
2975 /**
2976  * Validate the set VLAN VID.
2977  *
2978  * @param[in] item_flags
2979  *   Holds the items detected in this rule.
2980  * @param[in] action_flags
2981  *   Holds the actions detected until now.
2982  * @param[in] actions
2983  *   Pointer to the list of actions remaining in the flow rule.
2984  * @param[out] error
2985  *   Pointer to error structure.
2986  *
2987  * @return
2988  *   0 on success, a negative errno value otherwise and rte_errno is set.
2989  */
2990 static int
2991 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2992                                      uint64_t action_flags,
2993                                      const struct rte_flow_action actions[],
2994                                      struct rte_flow_error *error)
2995 {
2996         const struct rte_flow_action *action = actions;
2997         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2998
2999         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3000                 return rte_flow_error_set(error, EINVAL,
3001                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3002                                           "VLAN VID value is too big");
3003         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3004             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3005                 return rte_flow_error_set(error, ENOTSUP,
3006                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3007                                           "set VLAN VID action must follow push"
3008                                           " VLAN action or match on VLAN item");
3009         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3010                 return rte_flow_error_set(error, ENOTSUP,
3011                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3012                                           "Multiple VLAN VID modifications are "
3013                                           "not supported");
3014         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3015                 return rte_flow_error_set(error, EINVAL,
3016                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3017                                           "wrong action order, port_id should "
3018                                           "be after set VLAN VID");
3019         return 0;
3020 }
3021
3022 /*
3023  * Validate the FLAG action.
3024  *
3025  * @param[in] dev
3026  *   Pointer to the rte_eth_dev structure.
3027  * @param[in] action_flags
3028  *   Holds the actions detected until now.
3029  * @param[in] attr
3030  *   Pointer to flow attributes
3031  * @param[out] error
3032  *   Pointer to error structure.
3033  *
3034  * @return
3035  *   0 on success, a negative errno value otherwise and rte_errno is set.
3036  */
3037 static int
3038 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3039                              uint64_t action_flags,
3040                              const struct rte_flow_attr *attr,
3041                              struct rte_flow_error *error)
3042 {
3043         struct mlx5_priv *priv = dev->data->dev_private;
3044         struct mlx5_dev_config *config = &priv->config;
3045         int ret;
3046
3047         /* Fall back if no extended metadata register support. */
3048         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3049                 return mlx5_flow_validate_action_flag(action_flags, attr,
3050                                                       error);
3051         /* Extensive metadata mode requires registers. */
3052         if (!mlx5_flow_ext_mreg_supported(dev))
3053                 return rte_flow_error_set(error, ENOTSUP,
3054                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3055                                           "no metadata registers "
3056                                           "to support flag action");
3057         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3058                 return rte_flow_error_set(error, ENOTSUP,
3059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3060                                           "extended metadata register"
3061                                           " isn't available");
3062         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3063         if (ret < 0)
3064                 return ret;
3065         MLX5_ASSERT(ret > 0);
3066         if (action_flags & MLX5_FLOW_ACTION_MARK)
3067                 return rte_flow_error_set(error, EINVAL,
3068                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3069                                           "can't mark and flag in same flow");
3070         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3071                 return rte_flow_error_set(error, EINVAL,
3072                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3073                                           "can't have 2 flag"
3074                                           " actions in same flow");
3075         return 0;
3076 }
3077
3078 /**
3079  * Validate MARK action.
3080  *
3081  * @param[in] dev
3082  *   Pointer to the rte_eth_dev structure.
3083  * @param[in] action
3084  *   Pointer to action.
3085  * @param[in] action_flags
3086  *   Holds the actions detected until now.
3087  * @param[in] attr
3088  *   Pointer to flow attributes
3089  * @param[out] error
3090  *   Pointer to error structure.
3091  *
3092  * @return
3093  *   0 on success, a negative errno value otherwise and rte_errno is set.
3094  */
3095 static int
3096 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3097                              const struct rte_flow_action *action,
3098                              uint64_t action_flags,
3099                              const struct rte_flow_attr *attr,
3100                              struct rte_flow_error *error)
3101 {
3102         struct mlx5_priv *priv = dev->data->dev_private;
3103         struct mlx5_dev_config *config = &priv->config;
3104         const struct rte_flow_action_mark *mark = action->conf;
3105         int ret;
3106
3107         if (is_tunnel_offload_active(dev))
3108                 return rte_flow_error_set(error, ENOTSUP,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3110                                           "no mark action "
3111                                           "if tunnel offload active");
3112         /* Fall back if no extended metadata register support. */
3113         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3114                 return mlx5_flow_validate_action_mark(action, action_flags,
3115                                                       attr, error);
3116         /* Extensive metadata mode requires registers. */
3117         if (!mlx5_flow_ext_mreg_supported(dev))
3118                 return rte_flow_error_set(error, ENOTSUP,
3119                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3120                                           "no metadata registers "
3121                                           "to support mark action");
3122         if (!priv->sh->dv_mark_mask)
3123                 return rte_flow_error_set(error, ENOTSUP,
3124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3125                                           "extended metadata register"
3126                                           " isn't available");
3127         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3128         if (ret < 0)
3129                 return ret;
3130         MLX5_ASSERT(ret > 0);
3131         if (!mark)
3132                 return rte_flow_error_set(error, EINVAL,
3133                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3134                                           "configuration cannot be null");
3135         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3136                 return rte_flow_error_set(error, EINVAL,
3137                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3138                                           &mark->id,
3139                                           "mark id exceeds the limit");
3140         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3141                 return rte_flow_error_set(error, EINVAL,
3142                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3143                                           "can't flag and mark in same flow");
3144         if (action_flags & MLX5_FLOW_ACTION_MARK)
3145                 return rte_flow_error_set(error, EINVAL,
3146                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3147                                           "can't have 2 mark actions in same"
3148                                           " flow");
3149         return 0;
3150 }
3151
3152 /**
3153  * Validate SET_META action.
3154  *
3155  * @param[in] dev
3156  *   Pointer to the rte_eth_dev structure.
3157  * @param[in] action
3158  *   Pointer to the action structure.
3159  * @param[in] action_flags
3160  *   Holds the actions detected until now.
3161  * @param[in] attr
3162  *   Pointer to flow attributes
3163  * @param[out] error
3164  *   Pointer to error structure.
3165  *
3166  * @return
3167  *   0 on success, a negative errno value otherwise and rte_errno is set.
3168  */
3169 static int
3170 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3171                                  const struct rte_flow_action *action,
3172                                  uint64_t action_flags __rte_unused,
3173                                  const struct rte_flow_attr *attr,
3174                                  struct rte_flow_error *error)
3175 {
3176         struct mlx5_priv *priv = dev->data->dev_private;
3177         struct mlx5_dev_config *config = &priv->config;
3178         const struct rte_flow_action_set_meta *conf;
3179         uint32_t nic_mask = UINT32_MAX;
3180         int reg;
3181
3182         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3183             !mlx5_flow_ext_mreg_supported(dev))
3184                 return rte_flow_error_set(error, ENOTSUP,
3185                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3186                                           "extended metadata register"
3187                                           " isn't supported");
3188         reg = flow_dv_get_metadata_reg(dev, attr, error);
3189         if (reg < 0)
3190                 return reg;
3191         if (reg == REG_NON)
3192                 return rte_flow_error_set(error, ENOTSUP,
3193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3194                                           "unavailable extended metadata register");
3195         if (reg != REG_A && reg != REG_B) {
3196                 struct mlx5_priv *priv = dev->data->dev_private;
3197
3198                 nic_mask = priv->sh->dv_meta_mask;
3199         }
3200         if (!(action->conf))
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "configuration cannot be null");
3204         conf = (const struct rte_flow_action_set_meta *)action->conf;
3205         if (!conf->mask)
3206                 return rte_flow_error_set(error, EINVAL,
3207                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3208                                           "zero mask doesn't have any effect");
3209         if (conf->mask & ~nic_mask)
3210                 return rte_flow_error_set(error, EINVAL,
3211                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3212                                           "meta data must be within reg C0");
3213         return 0;
3214 }
3215
3216 /**
3217  * Validate SET_TAG action.
3218  *
3219  * @param[in] dev
3220  *   Pointer to the rte_eth_dev structure.
3221  * @param[in] action
3222  *   Pointer to the action structure.
3223  * @param[in] action_flags
3224  *   Holds the actions detected until now.
3225  * @param[in] attr
3226  *   Pointer to flow attributes
3227  * @param[out] error
3228  *   Pointer to error structure.
3229  *
3230  * @return
3231  *   0 on success, a negative errno value otherwise and rte_errno is set.
3232  */
3233 static int
3234 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3235                                 const struct rte_flow_action *action,
3236                                 uint64_t action_flags,
3237                                 const struct rte_flow_attr *attr,
3238                                 struct rte_flow_error *error)
3239 {
3240         const struct rte_flow_action_set_tag *conf;
3241         const uint64_t terminal_action_flags =
3242                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3243                 MLX5_FLOW_ACTION_RSS;
3244         int ret;
3245
3246         if (!mlx5_flow_ext_mreg_supported(dev))
3247                 return rte_flow_error_set(error, ENOTSUP,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "extensive metadata register"
3250                                           " isn't supported");
3251         if (!(action->conf))
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "configuration cannot be null");
3255         conf = (const struct rte_flow_action_set_tag *)action->conf;
3256         if (!conf->mask)
3257                 return rte_flow_error_set(error, EINVAL,
3258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3259                                           "zero mask doesn't have any effect");
3260         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3261         if (ret < 0)
3262                 return ret;
3263         if (!attr->transfer && attr->ingress &&
3264             (action_flags & terminal_action_flags))
3265                 return rte_flow_error_set(error, EINVAL,
3266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3267                                           "set_tag has no effect"
3268                                           " with terminal actions");
3269         return 0;
3270 }
3271
3272 /**
3273  * Validate count action.
3274  *
3275  * @param[in] dev
3276  *   Pointer to rte_eth_dev structure.
3277  * @param[in] shared
3278  *   Indicator if action is shared.
3279  * @param[in] action_flags
3280  *   Holds the actions detected until now.
3281  * @param[out] error
3282  *   Pointer to error structure.
3283  *
3284  * @return
3285  *   0 on success, a negative errno value otherwise and rte_errno is set.
3286  */
3287 static int
3288 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3289                               uint64_t action_flags,
3290                               struct rte_flow_error *error)
3291 {
3292         struct mlx5_priv *priv = dev->data->dev_private;
3293
3294         if (!priv->sh->devx)
3295                 goto notsup_err;
3296         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3297                 return rte_flow_error_set(error, EINVAL,
3298                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3299                                           "duplicate count actions set");
3300         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3301             !priv->sh->flow_hit_aso_en)
3302                 return rte_flow_error_set(error, EINVAL,
3303                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3304                                           "old age and shared count combination is not supported");
3305 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3306         return 0;
3307 #endif
3308 notsup_err:
3309         return rte_flow_error_set
3310                       (error, ENOTSUP,
3311                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3312                        NULL,
3313                        "count action not supported");
3314 }
3315
3316 /**
3317  * Validate the L2 encap action.
3318  *
3319  * @param[in] dev
3320  *   Pointer to the rte_eth_dev structure.
3321  * @param[in] action_flags
3322  *   Holds the actions detected until now.
3323  * @param[in] action
3324  *   Pointer to the action structure.
3325  * @param[in] attr
3326  *   Pointer to flow attributes.
3327  * @param[out] error
3328  *   Pointer to error structure.
3329  *
3330  * @return
3331  *   0 on success, a negative errno value otherwise and rte_errno is set.
3332  */
3333 static int
3334 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3335                                  uint64_t action_flags,
3336                                  const struct rte_flow_action *action,
3337                                  const struct rte_flow_attr *attr,
3338                                  struct rte_flow_error *error)
3339 {
3340         const struct mlx5_priv *priv = dev->data->dev_private;
3341
3342         if (!(action->conf))
3343                 return rte_flow_error_set(error, EINVAL,
3344                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3345                                           "configuration cannot be null");
3346         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3347                 return rte_flow_error_set(error, EINVAL,
3348                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3349                                           "can only have a single encap action "
3350                                           "in a flow");
3351         if (!attr->transfer && priv->representor)
3352                 return rte_flow_error_set(error, ENOTSUP,
3353                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3354                                           "encap action for VF representor "
3355                                           "not supported on NIC table");
3356         return 0;
3357 }
3358
3359 /**
3360  * Validate a decap action.
3361  *
3362  * @param[in] dev
3363  *   Pointer to the rte_eth_dev structure.
3364  * @param[in] action_flags
3365  *   Holds the actions detected until now.
3366  * @param[in] action
3367  *   Pointer to the action structure.
3368  * @param[in] item_flags
3369  *   Holds the items detected.
3370  * @param[in] attr
3371  *   Pointer to flow attributes
3372  * @param[out] error
3373  *   Pointer to error structure.
3374  *
3375  * @return
3376  *   0 on success, a negative errno value otherwise and rte_errno is set.
3377  */
3378 static int
3379 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3380                               uint64_t action_flags,
3381                               const struct rte_flow_action *action,
3382                               const uint64_t item_flags,
3383                               const struct rte_flow_attr *attr,
3384                               struct rte_flow_error *error)
3385 {
3386         const struct mlx5_priv *priv = dev->data->dev_private;
3387
3388         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3389             !priv->config.decap_en)
3390                 return rte_flow_error_set(error, ENOTSUP,
3391                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3392                                           "decap is not enabled");
3393         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3394                 return rte_flow_error_set(error, ENOTSUP,
3395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3396                                           action_flags &
3397                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3398                                           "have a single decap action" : "decap "
3399                                           "after encap is not supported");
3400         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3401                 return rte_flow_error_set(error, EINVAL,
3402                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3403                                           "can't have decap action after"
3404                                           " modify action");
3405         if (attr->egress)
3406                 return rte_flow_error_set(error, ENOTSUP,
3407                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3408                                           NULL,
3409                                           "decap action not supported for "
3410                                           "egress");
3411         if (!attr->transfer && priv->representor)
3412                 return rte_flow_error_set(error, ENOTSUP,
3413                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3414                                           "decap action for VF representor "
3415                                           "not supported on NIC table");
3416         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3417             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3418                 return rte_flow_error_set(error, ENOTSUP,
3419                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3420                                 "VXLAN item should be present for VXLAN decap");
3421         return 0;
3422 }
3423
3424 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3425
3426 /**
3427  * Validate the raw encap and decap actions.
3428  *
3429  * @param[in] dev
3430  *   Pointer to the rte_eth_dev structure.
3431  * @param[in] decap
3432  *   Pointer to the decap action.
3433  * @param[in] encap
3434  *   Pointer to the encap action.
3435  * @param[in] attr
3436  *   Pointer to flow attributes
3437  * @param[in/out] action_flags
3438  *   Holds the actions detected until now.
3439  * @param[out] actions_n
3440  *   pointer to the number of actions counter.
3441  * @param[in] action
3442  *   Pointer to the action structure.
3443  * @param[in] item_flags
3444  *   Holds the items detected.
3445  * @param[out] error
3446  *   Pointer to error structure.
3447  *
3448  * @return
3449  *   0 on success, a negative errno value otherwise and rte_errno is set.
3450  */
3451 static int
3452 flow_dv_validate_action_raw_encap_decap
3453         (struct rte_eth_dev *dev,
3454          const struct rte_flow_action_raw_decap *decap,
3455          const struct rte_flow_action_raw_encap *encap,
3456          const struct rte_flow_attr *attr, uint64_t *action_flags,
3457          int *actions_n, const struct rte_flow_action *action,
3458          uint64_t item_flags, struct rte_flow_error *error)
3459 {
3460         const struct mlx5_priv *priv = dev->data->dev_private;
3461         int ret;
3462
3463         if (encap && (!encap->size || !encap->data))
3464                 return rte_flow_error_set(error, EINVAL,
3465                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3466                                           "raw encap data cannot be empty");
3467         if (decap && encap) {
3468                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3469                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3470                         /* L3 encap. */
3471                         decap = NULL;
3472                 else if (encap->size <=
3473                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3474                            decap->size >
3475                            MLX5_ENCAPSULATION_DECISION_SIZE)
3476                         /* L3 decap. */
3477                         encap = NULL;
3478                 else if (encap->size >
3479                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3480                            decap->size >
3481                            MLX5_ENCAPSULATION_DECISION_SIZE)
3482                         /* 2 L2 actions: encap and decap. */
3483                         ;
3484                 else
3485                         return rte_flow_error_set(error,
3486                                 ENOTSUP,
3487                                 RTE_FLOW_ERROR_TYPE_ACTION,
3488                                 NULL, "unsupported too small "
3489                                 "raw decap and too small raw "
3490                                 "encap combination");
3491         }
3492         if (decap) {
3493                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3494                                                     item_flags, attr, error);
3495                 if (ret < 0)
3496                         return ret;
3497                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3498                 ++(*actions_n);
3499         }
3500         if (encap) {
3501                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3502                         return rte_flow_error_set(error, ENOTSUP,
3503                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3504                                                   NULL,
3505                                                   "small raw encap size");
3506                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3507                         return rte_flow_error_set(error, EINVAL,
3508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3509                                                   NULL,
3510                                                   "more than one encap action");
3511                 if (!attr->transfer && priv->representor)
3512                         return rte_flow_error_set
3513                                         (error, ENOTSUP,
3514                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3515                                          "encap action for VF representor "
3516                                          "not supported on NIC table");
3517                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3518                 ++(*actions_n);
3519         }
3520         return 0;
3521 }
3522
3523 /*
3524  * Validate the ASO CT action.
3525  *
3526  * @param[in] dev
3527  *   Pointer to the rte_eth_dev structure.
3528  * @param[in] action_flags
3529  *   Holds the actions detected until now.
3530  * @param[in] item_flags
3531  *   The items found in this flow rule.
3532  * @param[in] attr
3533  *   Pointer to flow attributes.
3534  * @param[out] error
3535  *   Pointer to error structure.
3536  *
3537  * @return
3538  *   0 on success, a negative errno value otherwise and rte_errno is set.
3539  */
3540 static int
3541 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3542                                uint64_t action_flags,
3543                                uint64_t item_flags,
3544                                const struct rte_flow_attr *attr,
3545                                struct rte_flow_error *error)
3546 {
3547         RTE_SET_USED(dev);
3548
3549         if (attr->group == 0 && !attr->transfer)
3550                 return rte_flow_error_set(error, ENOTSUP,
3551                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3552                                           NULL,
3553                                           "Only support non-root table");
3554         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3555                 return rte_flow_error_set(error, ENOTSUP,
3556                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3557                                           "CT cannot follow a fate action");
3558         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3559             (action_flags & MLX5_FLOW_ACTION_AGE))
3560                 return rte_flow_error_set(error, EINVAL,
3561                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3562                                           "Only one ASO action is supported");
3563         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3564                 return rte_flow_error_set(error, EINVAL,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "Encap cannot exist before CT");
3567         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3568                 return rte_flow_error_set(error, EINVAL,
3569                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3570                                           "Not a outer TCP packet");
3571         return 0;
3572 }
3573
3574 int
3575 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3576                              struct mlx5_list_entry *entry, void *cb_ctx)
3577 {
3578         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3579         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3580         struct mlx5_flow_dv_encap_decap_resource *resource;
3581
3582         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3583                                 entry);
3584         if (resource->reformat_type == ctx_resource->reformat_type &&
3585             resource->ft_type == ctx_resource->ft_type &&
3586             resource->flags == ctx_resource->flags &&
3587             resource->size == ctx_resource->size &&
3588             !memcmp((const void *)resource->buf,
3589                     (const void *)ctx_resource->buf,
3590                     resource->size))
3591                 return 0;
3592         return -1;
3593 }
3594
3595 struct mlx5_list_entry *
3596 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3597 {
3598         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3599         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3600         struct mlx5dv_dr_domain *domain;
3601         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3602         struct mlx5_flow_dv_encap_decap_resource *resource;
3603         uint32_t idx;
3604         int ret;
3605
3606         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3607                 domain = sh->fdb_domain;
3608         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3609                 domain = sh->rx_domain;
3610         else
3611                 domain = sh->tx_domain;
3612         /* Register new encap/decap resource. */
3613         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3614         if (!resource) {
3615                 rte_flow_error_set(ctx->error, ENOMEM,
3616                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3617                                    "cannot allocate resource memory");
3618                 return NULL;
3619         }
3620         *resource = *ctx_resource;
3621         resource->idx = idx;
3622         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3623                                                               domain, resource,
3624                                                              &resource->action);
3625         if (ret) {
3626                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3627                 rte_flow_error_set(ctx->error, ENOMEM,
3628                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3629                                    NULL, "cannot create action");
3630                 return NULL;
3631         }
3632
3633         return &resource->entry;
3634 }
3635
3636 struct mlx5_list_entry *
3637 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3638                              void *cb_ctx)
3639 {
3640         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3641         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3642         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3643         uint32_t idx;
3644
3645         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3646                                            &idx);
3647         if (!cache_resource) {
3648                 rte_flow_error_set(ctx->error, ENOMEM,
3649                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3650                                    "cannot allocate resource memory");
3651                 return NULL;
3652         }
3653         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3654         cache_resource->idx = idx;
3655         return &cache_resource->entry;
3656 }
3657
3658 void
3659 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3660 {
3661         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3662         struct mlx5_flow_dv_encap_decap_resource *res =
3663                                        container_of(entry, typeof(*res), entry);
3664
3665         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3666 }
3667
3668 /**
3669  * Find existing encap/decap resource or create and register a new one.
3670  *
3671  * @param[in, out] dev
3672  *   Pointer to rte_eth_dev structure.
3673  * @param[in, out] resource
3674  *   Pointer to encap/decap resource.
3675  * @parm[in, out] dev_flow
3676  *   Pointer to the dev_flow.
3677  * @param[out] error
3678  *   pointer to error structure.
3679  *
3680  * @return
3681  *   0 on success otherwise -errno and errno is set.
3682  */
3683 static int
3684 flow_dv_encap_decap_resource_register
3685                         (struct rte_eth_dev *dev,
3686                          struct mlx5_flow_dv_encap_decap_resource *resource,
3687                          struct mlx5_flow *dev_flow,
3688                          struct rte_flow_error *error)
3689 {
3690         struct mlx5_priv *priv = dev->data->dev_private;
3691         struct mlx5_dev_ctx_shared *sh = priv->sh;
3692         struct mlx5_list_entry *entry;
3693         union {
3694                 struct {
3695                         uint32_t ft_type:8;
3696                         uint32_t refmt_type:8;
3697                         /*
3698                          * Header reformat actions can be shared between
3699                          * non-root tables. One bit to indicate non-root
3700                          * table or not.
3701                          */
3702                         uint32_t is_root:1;
3703                         uint32_t reserve:15;
3704                 };
3705                 uint32_t v32;
3706         } encap_decap_key = {
3707                 {
3708                         .ft_type = resource->ft_type,
3709                         .refmt_type = resource->reformat_type,
3710                         .is_root = !!dev_flow->dv.group,
3711                         .reserve = 0,
3712                 }
3713         };
3714         struct mlx5_flow_cb_ctx ctx = {
3715                 .error = error,
3716                 .data = resource,
3717         };
3718         struct mlx5_hlist *encaps_decaps;
3719         uint64_t key64;
3720
3721         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3722                                 "encaps_decaps",
3723                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3724                                 true, true, sh,
3725                                 flow_dv_encap_decap_create_cb,
3726                                 flow_dv_encap_decap_match_cb,
3727                                 flow_dv_encap_decap_remove_cb,
3728                                 flow_dv_encap_decap_clone_cb,
3729                                 flow_dv_encap_decap_clone_free_cb);
3730         if (unlikely(!encaps_decaps))
3731                 return -rte_errno;
3732         resource->flags = dev_flow->dv.group ? 0 : 1;
3733         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3734                                  sizeof(encap_decap_key.v32), 0);
3735         if (resource->reformat_type !=
3736             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3737             resource->size)
3738                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3739         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3740         if (!entry)
3741                 return -rte_errno;
3742         resource = container_of(entry, typeof(*resource), entry);
3743         dev_flow->dv.encap_decap = resource;
3744         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3745         return 0;
3746 }
3747
3748 /**
3749  * Find existing table jump resource or create and register a new one.
3750  *
3751  * @param[in, out] dev
3752  *   Pointer to rte_eth_dev structure.
3753  * @param[in, out] tbl
3754  *   Pointer to flow table resource.
3755  * @parm[in, out] dev_flow
3756  *   Pointer to the dev_flow.
3757  * @param[out] error
3758  *   pointer to error structure.
3759  *
3760  * @return
3761  *   0 on success otherwise -errno and errno is set.
3762  */
3763 static int
3764 flow_dv_jump_tbl_resource_register
3765                         (struct rte_eth_dev *dev __rte_unused,
3766                          struct mlx5_flow_tbl_resource *tbl,
3767                          struct mlx5_flow *dev_flow,
3768                          struct rte_flow_error *error __rte_unused)
3769 {
3770         struct mlx5_flow_tbl_data_entry *tbl_data =
3771                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3772
3773         MLX5_ASSERT(tbl);
3774         MLX5_ASSERT(tbl_data->jump.action);
3775         dev_flow->handle->rix_jump = tbl_data->idx;
3776         dev_flow->dv.jump = &tbl_data->jump;
3777         return 0;
3778 }
3779
3780 int
3781 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3782                          struct mlx5_list_entry *entry, void *cb_ctx)
3783 {
3784         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3785         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3786         struct mlx5_flow_dv_port_id_action_resource *res =
3787                                        container_of(entry, typeof(*res), entry);
3788
3789         return ref->port_id != res->port_id;
3790 }
3791
3792 struct mlx5_list_entry *
3793 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3794 {
3795         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3797         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3798         struct mlx5_flow_dv_port_id_action_resource *resource;
3799         uint32_t idx;
3800         int ret;
3801
3802         /* Register new port id action resource. */
3803         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3804         if (!resource) {
3805                 rte_flow_error_set(ctx->error, ENOMEM,
3806                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3807                                    "cannot allocate port_id action memory");
3808                 return NULL;
3809         }
3810         *resource = *ref;
3811         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3812                                                         ref->port_id,
3813                                                         &resource->action);
3814         if (ret) {
3815                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot create action");
3819                 return NULL;
3820         }
3821         resource->idx = idx;
3822         return &resource->entry;
3823 }
3824
3825 struct mlx5_list_entry *
3826 flow_dv_port_id_clone_cb(void *tool_ctx,
3827                          struct mlx5_list_entry *entry __rte_unused,
3828                          void *cb_ctx)
3829 {
3830         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3831         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3832         struct mlx5_flow_dv_port_id_action_resource *resource;
3833         uint32_t idx;
3834
3835         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3836         if (!resource) {
3837                 rte_flow_error_set(ctx->error, ENOMEM,
3838                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3839                                    "cannot allocate port_id action memory");
3840                 return NULL;
3841         }
3842         memcpy(resource, entry, sizeof(*resource));
3843         resource->idx = idx;
3844         return &resource->entry;
3845 }
3846
3847 void
3848 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3849 {
3850         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3851         struct mlx5_flow_dv_port_id_action_resource *resource =
3852                                   container_of(entry, typeof(*resource), entry);
3853
3854         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3855 }
3856
3857 /**
3858  * Find existing table port ID resource or create and register a new one.
3859  *
3860  * @param[in, out] dev
3861  *   Pointer to rte_eth_dev structure.
3862  * @param[in, out] ref
3863  *   Pointer to port ID action resource reference.
3864  * @parm[in, out] dev_flow
3865  *   Pointer to the dev_flow.
3866  * @param[out] error
3867  *   pointer to error structure.
3868  *
3869  * @return
3870  *   0 on success otherwise -errno and errno is set.
3871  */
3872 static int
3873 flow_dv_port_id_action_resource_register
3874                         (struct rte_eth_dev *dev,
3875                          struct mlx5_flow_dv_port_id_action_resource *ref,
3876                          struct mlx5_flow *dev_flow,
3877                          struct rte_flow_error *error)
3878 {
3879         struct mlx5_priv *priv = dev->data->dev_private;
3880         struct mlx5_list_entry *entry;
3881         struct mlx5_flow_dv_port_id_action_resource *resource;
3882         struct mlx5_flow_cb_ctx ctx = {
3883                 .error = error,
3884                 .data = ref,
3885         };
3886
3887         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3888         if (!entry)
3889                 return -rte_errno;
3890         resource = container_of(entry, typeof(*resource), entry);
3891         dev_flow->dv.port_id_action = resource;
3892         dev_flow->handle->rix_port_id_action = resource->idx;
3893         return 0;
3894 }
3895
3896 int
3897 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3898                            struct mlx5_list_entry *entry, void *cb_ctx)
3899 {
3900         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3901         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3902         struct mlx5_flow_dv_push_vlan_action_resource *res =
3903                                        container_of(entry, typeof(*res), entry);
3904
3905         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3906 }
3907
3908 struct mlx5_list_entry *
3909 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3910 {
3911         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3912         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3913         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3914         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3915         struct mlx5dv_dr_domain *domain;
3916         uint32_t idx;
3917         int ret;
3918
3919         /* Register new port id action resource. */
3920         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3921         if (!resource) {
3922                 rte_flow_error_set(ctx->error, ENOMEM,
3923                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3924                                    "cannot allocate push_vlan action memory");
3925                 return NULL;
3926         }
3927         *resource = *ref;
3928         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3929                 domain = sh->fdb_domain;
3930         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3931                 domain = sh->rx_domain;
3932         else
3933                 domain = sh->tx_domain;
3934         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3935                                                         &resource->action);
3936         if (ret) {
3937                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3938                 rte_flow_error_set(ctx->error, ENOMEM,
3939                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3940                                    "cannot create push vlan action");
3941                 return NULL;
3942         }
3943         resource->idx = idx;
3944         return &resource->entry;
3945 }
3946
3947 struct mlx5_list_entry *
3948 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3949                            struct mlx5_list_entry *entry __rte_unused,
3950                            void *cb_ctx)
3951 {
3952         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3953         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3954         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3955         uint32_t idx;
3956
3957         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3958         if (!resource) {
3959                 rte_flow_error_set(ctx->error, ENOMEM,
3960                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3961                                    "cannot allocate push_vlan action memory");
3962                 return NULL;
3963         }
3964         memcpy(resource, entry, sizeof(*resource));
3965         resource->idx = idx;
3966         return &resource->entry;
3967 }
3968
3969 void
3970 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3971 {
3972         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3973         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3974                                   container_of(entry, typeof(*resource), entry);
3975
3976         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3977 }
3978
3979 /**
3980  * Find existing push vlan resource or create and register a new one.
3981  *
3982  * @param [in, out] dev
3983  *   Pointer to rte_eth_dev structure.
3984  * @param[in, out] ref
3985  *   Pointer to port ID action resource reference.
3986  * @parm[in, out] dev_flow
3987  *   Pointer to the dev_flow.
3988  * @param[out] error
3989  *   pointer to error structure.
3990  *
3991  * @return
3992  *   0 on success otherwise -errno and errno is set.
3993  */
3994 static int
3995 flow_dv_push_vlan_action_resource_register
3996                        (struct rte_eth_dev *dev,
3997                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
3998                         struct mlx5_flow *dev_flow,
3999                         struct rte_flow_error *error)
4000 {
4001         struct mlx5_priv *priv = dev->data->dev_private;
4002         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4003         struct mlx5_list_entry *entry;
4004         struct mlx5_flow_cb_ctx ctx = {
4005                 .error = error,
4006                 .data = ref,
4007         };
4008
4009         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4010         if (!entry)
4011                 return -rte_errno;
4012         resource = container_of(entry, typeof(*resource), entry);
4013
4014         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4015         dev_flow->dv.push_vlan_res = resource;
4016         return 0;
4017 }
4018
4019 /**
4020  * Get the size of specific rte_flow_item_type hdr size
4021  *
4022  * @param[in] item_type
4023  *   Tested rte_flow_item_type.
4024  *
4025  * @return
4026  *   sizeof struct item_type, 0 if void or irrelevant.
4027  */
4028 static size_t
4029 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4030 {
4031         size_t retval;
4032
4033         switch (item_type) {
4034         case RTE_FLOW_ITEM_TYPE_ETH:
4035                 retval = sizeof(struct rte_ether_hdr);
4036                 break;
4037         case RTE_FLOW_ITEM_TYPE_VLAN:
4038                 retval = sizeof(struct rte_vlan_hdr);
4039                 break;
4040         case RTE_FLOW_ITEM_TYPE_IPV4:
4041                 retval = sizeof(struct rte_ipv4_hdr);
4042                 break;
4043         case RTE_FLOW_ITEM_TYPE_IPV6:
4044                 retval = sizeof(struct rte_ipv6_hdr);
4045                 break;
4046         case RTE_FLOW_ITEM_TYPE_UDP:
4047                 retval = sizeof(struct rte_udp_hdr);
4048                 break;
4049         case RTE_FLOW_ITEM_TYPE_TCP:
4050                 retval = sizeof(struct rte_tcp_hdr);
4051                 break;
4052         case RTE_FLOW_ITEM_TYPE_VXLAN:
4053         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4054                 retval = sizeof(struct rte_vxlan_hdr);
4055                 break;
4056         case RTE_FLOW_ITEM_TYPE_GRE:
4057         case RTE_FLOW_ITEM_TYPE_NVGRE:
4058                 retval = sizeof(struct rte_gre_hdr);
4059                 break;
4060         case RTE_FLOW_ITEM_TYPE_MPLS:
4061                 retval = sizeof(struct rte_mpls_hdr);
4062                 break;
4063         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4064         default:
4065                 retval = 0;
4066                 break;
4067         }
4068         return retval;
4069 }
4070
4071 #define MLX5_ENCAP_IPV4_VERSION         0x40
4072 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4073 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4074 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4075 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4076 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4077 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4078
4079 /**
4080  * Convert the encap action data from list of rte_flow_item to raw buffer
4081  *
4082  * @param[in] items
4083  *   Pointer to rte_flow_item objects list.
4084  * @param[out] buf
4085  *   Pointer to the output buffer.
4086  * @param[out] size
4087  *   Pointer to the output buffer size.
4088  * @param[out] error
4089  *   Pointer to the error structure.
4090  *
4091  * @return
4092  *   0 on success, a negative errno value otherwise and rte_errno is set.
4093  */
4094 static int
4095 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4096                            size_t *size, struct rte_flow_error *error)
4097 {
4098         struct rte_ether_hdr *eth = NULL;
4099         struct rte_vlan_hdr *vlan = NULL;
4100         struct rte_ipv4_hdr *ipv4 = NULL;
4101         struct rte_ipv6_hdr *ipv6 = NULL;
4102         struct rte_udp_hdr *udp = NULL;
4103         struct rte_vxlan_hdr *vxlan = NULL;
4104         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4105         struct rte_gre_hdr *gre = NULL;
4106         size_t len;
4107         size_t temp_size = 0;
4108
4109         if (!items)
4110                 return rte_flow_error_set(error, EINVAL,
4111                                           RTE_FLOW_ERROR_TYPE_ACTION,
4112                                           NULL, "invalid empty data");
4113         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4114                 len = flow_dv_get_item_hdr_len(items->type);
4115                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4116                         return rte_flow_error_set(error, EINVAL,
4117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4118                                                   (void *)items->type,
4119                                                   "items total size is too big"
4120                                                   " for encap action");
4121                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4122                 switch (items->type) {
4123                 case RTE_FLOW_ITEM_TYPE_ETH:
4124                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4125                         break;
4126                 case RTE_FLOW_ITEM_TYPE_VLAN:
4127                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4128                         if (!eth)
4129                                 return rte_flow_error_set(error, EINVAL,
4130                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4131                                                 (void *)items->type,
4132                                                 "eth header not found");
4133                         if (!eth->ether_type)
4134                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4135                         break;
4136                 case RTE_FLOW_ITEM_TYPE_IPV4:
4137                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4138                         if (!vlan && !eth)
4139                                 return rte_flow_error_set(error, EINVAL,
4140                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4141                                                 (void *)items->type,
4142                                                 "neither eth nor vlan"
4143                                                 " header found");
4144                         if (vlan && !vlan->eth_proto)
4145                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4146                         else if (eth && !eth->ether_type)
4147                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4148                         if (!ipv4->version_ihl)
4149                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4150                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4151                         if (!ipv4->time_to_live)
4152                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4153                         break;
4154                 case RTE_FLOW_ITEM_TYPE_IPV6:
4155                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4156                         if (!vlan && !eth)
4157                                 return rte_flow_error_set(error, EINVAL,
4158                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4159                                                 (void *)items->type,
4160                                                 "neither eth nor vlan"
4161                                                 " header found");
4162                         if (vlan && !vlan->eth_proto)
4163                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4164                         else if (eth && !eth->ether_type)
4165                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4166                         if (!ipv6->vtc_flow)
4167                                 ipv6->vtc_flow =
4168                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4169                         if (!ipv6->hop_limits)
4170                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_UDP:
4173                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4174                         if (!ipv4 && !ipv6)
4175                                 return rte_flow_error_set(error, EINVAL,
4176                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4177                                                 (void *)items->type,
4178                                                 "ip header not found");
4179                         if (ipv4 && !ipv4->next_proto_id)
4180                                 ipv4->next_proto_id = IPPROTO_UDP;
4181                         else if (ipv6 && !ipv6->proto)
4182                                 ipv6->proto = IPPROTO_UDP;
4183                         break;
4184                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4185                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4186                         if (!udp)
4187                                 return rte_flow_error_set(error, EINVAL,
4188                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4189                                                 (void *)items->type,
4190                                                 "udp header not found");
4191                         if (!udp->dst_port)
4192                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4193                         if (!vxlan->vx_flags)
4194                                 vxlan->vx_flags =
4195                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4196                         break;
4197                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4198                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4199                         if (!udp)
4200                                 return rte_flow_error_set(error, EINVAL,
4201                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4202                                                 (void *)items->type,
4203                                                 "udp header not found");
4204                         if (!vxlan_gpe->proto)
4205                                 return rte_flow_error_set(error, EINVAL,
4206                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4207                                                 (void *)items->type,
4208                                                 "next protocol not found");
4209                         if (!udp->dst_port)
4210                                 udp->dst_port =
4211                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4212                         if (!vxlan_gpe->vx_flags)
4213                                 vxlan_gpe->vx_flags =
4214                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4215                         break;
4216                 case RTE_FLOW_ITEM_TYPE_GRE:
4217                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4218                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4219                         if (!gre->proto)
4220                                 return rte_flow_error_set(error, EINVAL,
4221                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4222                                                 (void *)items->type,
4223                                                 "next protocol not found");
4224                         if (!ipv4 && !ipv6)
4225                                 return rte_flow_error_set(error, EINVAL,
4226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4227                                                 (void *)items->type,
4228                                                 "ip header not found");
4229                         if (ipv4 && !ipv4->next_proto_id)
4230                                 ipv4->next_proto_id = IPPROTO_GRE;
4231                         else if (ipv6 && !ipv6->proto)
4232                                 ipv6->proto = IPPROTO_GRE;
4233                         break;
4234                 case RTE_FLOW_ITEM_TYPE_VOID:
4235                         break;
4236                 default:
4237                         return rte_flow_error_set(error, EINVAL,
4238                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4239                                                   (void *)items->type,
4240                                                   "unsupported item type");
4241                         break;
4242                 }
4243                 temp_size += len;
4244         }
4245         *size = temp_size;
4246         return 0;
4247 }
4248
4249 static int
4250 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4251 {
4252         struct rte_ether_hdr *eth = NULL;
4253         struct rte_vlan_hdr *vlan = NULL;
4254         struct rte_ipv6_hdr *ipv6 = NULL;
4255         struct rte_udp_hdr *udp = NULL;
4256         char *next_hdr;
4257         uint16_t proto;
4258
4259         eth = (struct rte_ether_hdr *)data;
4260         next_hdr = (char *)(eth + 1);
4261         proto = RTE_BE16(eth->ether_type);
4262
4263         /* VLAN skipping */
4264         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4265                 vlan = (struct rte_vlan_hdr *)next_hdr;
4266                 proto = RTE_BE16(vlan->eth_proto);
4267                 next_hdr += sizeof(struct rte_vlan_hdr);
4268         }
4269
4270         /* HW calculates IPv4 csum. no need to proceed */
4271         if (proto == RTE_ETHER_TYPE_IPV4)
4272                 return 0;
4273
4274         /* non IPv4/IPv6 header. not supported */
4275         if (proto != RTE_ETHER_TYPE_IPV6) {
4276                 return rte_flow_error_set(error, ENOTSUP,
4277                                           RTE_FLOW_ERROR_TYPE_ACTION,
4278                                           NULL, "Cannot offload non IPv4/IPv6");
4279         }
4280
4281         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4282
4283         /* ignore non UDP */
4284         if (ipv6->proto != IPPROTO_UDP)
4285                 return 0;
4286
4287         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4288         udp->dgram_cksum = 0;
4289
4290         return 0;
4291 }
4292
4293 /**
4294  * Convert L2 encap action to DV specification.
4295  *
4296  * @param[in] dev
4297  *   Pointer to rte_eth_dev structure.
4298  * @param[in] action
4299  *   Pointer to action structure.
4300  * @param[in, out] dev_flow
4301  *   Pointer to the mlx5_flow.
4302  * @param[in] transfer
4303  *   Mark if the flow is E-Switch flow.
4304  * @param[out] error
4305  *   Pointer to the error structure.
4306  *
4307  * @return
4308  *   0 on success, a negative errno value otherwise and rte_errno is set.
4309  */
4310 static int
4311 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4312                                const struct rte_flow_action *action,
4313                                struct mlx5_flow *dev_flow,
4314                                uint8_t transfer,
4315                                struct rte_flow_error *error)
4316 {
4317         const struct rte_flow_item *encap_data;
4318         const struct rte_flow_action_raw_encap *raw_encap_data;
4319         struct mlx5_flow_dv_encap_decap_resource res = {
4320                 .reformat_type =
4321                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4322                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4323                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4324         };
4325
4326         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4327                 raw_encap_data =
4328                         (const struct rte_flow_action_raw_encap *)action->conf;
4329                 res.size = raw_encap_data->size;
4330                 memcpy(res.buf, raw_encap_data->data, res.size);
4331         } else {
4332                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4333                         encap_data =
4334                                 ((const struct rte_flow_action_vxlan_encap *)
4335                                                 action->conf)->definition;
4336                 else
4337                         encap_data =
4338                                 ((const struct rte_flow_action_nvgre_encap *)
4339                                                 action->conf)->definition;
4340                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4341                                                &res.size, error))
4342                         return -rte_errno;
4343         }
4344         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4345                 return -rte_errno;
4346         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4347                 return rte_flow_error_set(error, EINVAL,
4348                                           RTE_FLOW_ERROR_TYPE_ACTION,
4349                                           NULL, "can't create L2 encap action");
4350         return 0;
4351 }
4352
4353 /**
4354  * Convert L2 decap action to DV specification.
4355  *
4356  * @param[in] dev
4357  *   Pointer to rte_eth_dev structure.
4358  * @param[in, out] dev_flow
4359  *   Pointer to the mlx5_flow.
4360  * @param[in] transfer
4361  *   Mark if the flow is E-Switch flow.
4362  * @param[out] error
4363  *   Pointer to the error structure.
4364  *
4365  * @return
4366  *   0 on success, a negative errno value otherwise and rte_errno is set.
4367  */
4368 static int
4369 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4370                                struct mlx5_flow *dev_flow,
4371                                uint8_t transfer,
4372                                struct rte_flow_error *error)
4373 {
4374         struct mlx5_flow_dv_encap_decap_resource res = {
4375                 .size = 0,
4376                 .reformat_type =
4377                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4378                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4379                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4380         };
4381
4382         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4383                 return rte_flow_error_set(error, EINVAL,
4384                                           RTE_FLOW_ERROR_TYPE_ACTION,
4385                                           NULL, "can't create L2 decap action");
4386         return 0;
4387 }
4388
4389 /**
4390  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4391  *
4392  * @param[in] dev
4393  *   Pointer to rte_eth_dev structure.
4394  * @param[in] action
4395  *   Pointer to action structure.
4396  * @param[in, out] dev_flow
4397  *   Pointer to the mlx5_flow.
4398  * @param[in] attr
4399  *   Pointer to the flow attributes.
4400  * @param[out] error
4401  *   Pointer to the error structure.
4402  *
4403  * @return
4404  *   0 on success, a negative errno value otherwise and rte_errno is set.
4405  */
4406 static int
4407 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4408                                 const struct rte_flow_action *action,
4409                                 struct mlx5_flow *dev_flow,
4410                                 const struct rte_flow_attr *attr,
4411                                 struct rte_flow_error *error)
4412 {
4413         const struct rte_flow_action_raw_encap *encap_data;
4414         struct mlx5_flow_dv_encap_decap_resource res;
4415
4416         memset(&res, 0, sizeof(res));
4417         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4418         res.size = encap_data->size;
4419         memcpy(res.buf, encap_data->data, res.size);
4420         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4421                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4422                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4423         if (attr->transfer)
4424                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4425         else
4426                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4427                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4428         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4429                 return rte_flow_error_set(error, EINVAL,
4430                                           RTE_FLOW_ERROR_TYPE_ACTION,
4431                                           NULL, "can't create encap action");
4432         return 0;
4433 }
4434
4435 /**
4436  * Create action push VLAN.
4437  *
4438  * @param[in] dev
4439  *   Pointer to rte_eth_dev structure.
4440  * @param[in] attr
4441  *   Pointer to the flow attributes.
4442  * @param[in] vlan
4443  *   Pointer to the vlan to push to the Ethernet header.
4444  * @param[in, out] dev_flow
4445  *   Pointer to the mlx5_flow.
4446  * @param[out] error
4447  *   Pointer to the error structure.
4448  *
4449  * @return
4450  *   0 on success, a negative errno value otherwise and rte_errno is set.
4451  */
4452 static int
4453 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4454                                 const struct rte_flow_attr *attr,
4455                                 const struct rte_vlan_hdr *vlan,
4456                                 struct mlx5_flow *dev_flow,
4457                                 struct rte_flow_error *error)
4458 {
4459         struct mlx5_flow_dv_push_vlan_action_resource res;
4460
4461         memset(&res, 0, sizeof(res));
4462         res.vlan_tag =
4463                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4464                                  vlan->vlan_tci);
4465         if (attr->transfer)
4466                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4467         else
4468                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4469                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4470         return flow_dv_push_vlan_action_resource_register
4471                                             (dev, &res, dev_flow, error);
4472 }
4473
4474 /**
4475  * Validate the modify-header actions.
4476  *
4477  * @param[in] action_flags
4478  *   Holds the actions detected until now.
4479  * @param[in] action
4480  *   Pointer to the modify action.
4481  * @param[out] error
4482  *   Pointer to error structure.
4483  *
4484  * @return
4485  *   0 on success, a negative errno value otherwise and rte_errno is set.
4486  */
4487 static int
4488 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4489                                    const struct rte_flow_action *action,
4490                                    struct rte_flow_error *error)
4491 {
4492         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4493                 return rte_flow_error_set(error, EINVAL,
4494                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4495                                           NULL, "action configuration not set");
4496         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4497                 return rte_flow_error_set(error, EINVAL,
4498                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4499                                           "can't have encap action before"
4500                                           " modify action");
4501         return 0;
4502 }
4503
4504 /**
4505  * Validate the modify-header MAC address actions.
4506  *
4507  * @param[in] action_flags
4508  *   Holds the actions detected until now.
4509  * @param[in] action
4510  *   Pointer to the modify action.
4511  * @param[in] item_flags
4512  *   Holds the items detected.
4513  * @param[out] error
4514  *   Pointer to error structure.
4515  *
4516  * @return
4517  *   0 on success, a negative errno value otherwise and rte_errno is set.
4518  */
4519 static int
4520 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4521                                    const struct rte_flow_action *action,
4522                                    const uint64_t item_flags,
4523                                    struct rte_flow_error *error)
4524 {
4525         int ret = 0;
4526
4527         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4528         if (!ret) {
4529                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4530                         return rte_flow_error_set(error, EINVAL,
4531                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4532                                                   NULL,
4533                                                   "no L2 item in pattern");
4534         }
4535         return ret;
4536 }
4537
4538 /**
4539  * Validate the modify-header IPv4 address actions.
4540  *
4541  * @param[in] action_flags
4542  *   Holds the actions detected until now.
4543  * @param[in] action
4544  *   Pointer to the modify action.
4545  * @param[in] item_flags
4546  *   Holds the items detected.
4547  * @param[out] error
4548  *   Pointer to error structure.
4549  *
4550  * @return
4551  *   0 on success, a negative errno value otherwise and rte_errno is set.
4552  */
4553 static int
4554 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4555                                     const struct rte_flow_action *action,
4556                                     const uint64_t item_flags,
4557                                     struct rte_flow_error *error)
4558 {
4559         int ret = 0;
4560         uint64_t layer;
4561
4562         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4563         if (!ret) {
4564                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4565                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4566                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4567                 if (!(item_flags & layer))
4568                         return rte_flow_error_set(error, EINVAL,
4569                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4570                                                   NULL,
4571                                                   "no ipv4 item in pattern");
4572         }
4573         return ret;
4574 }
4575
4576 /**
4577  * Validate the modify-header IPv6 address actions.
4578  *
4579  * @param[in] action_flags
4580  *   Holds the actions detected until now.
4581  * @param[in] action
4582  *   Pointer to the modify action.
4583  * @param[in] item_flags
4584  *   Holds the items detected.
4585  * @param[out] error
4586  *   Pointer to error structure.
4587  *
4588  * @return
4589  *   0 on success, a negative errno value otherwise and rte_errno is set.
4590  */
4591 static int
4592 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4593                                     const struct rte_flow_action *action,
4594                                     const uint64_t item_flags,
4595                                     struct rte_flow_error *error)
4596 {
4597         int ret = 0;
4598         uint64_t layer;
4599
4600         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4601         if (!ret) {
4602                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4603                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4604                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4605                 if (!(item_flags & layer))
4606                         return rte_flow_error_set(error, EINVAL,
4607                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4608                                                   NULL,
4609                                                   "no ipv6 item in pattern");
4610         }
4611         return ret;
4612 }
4613
4614 /**
4615  * Validate the modify-header TP actions.
4616  *
4617  * @param[in] action_flags
4618  *   Holds the actions detected until now.
4619  * @param[in] action
4620  *   Pointer to the modify action.
4621  * @param[in] item_flags
4622  *   Holds the items detected.
4623  * @param[out] error
4624  *   Pointer to error structure.
4625  *
4626  * @return
4627  *   0 on success, a negative errno value otherwise and rte_errno is set.
4628  */
4629 static int
4630 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4631                                   const struct rte_flow_action *action,
4632                                   const uint64_t item_flags,
4633                                   struct rte_flow_error *error)
4634 {
4635         int ret = 0;
4636         uint64_t layer;
4637
4638         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4639         if (!ret) {
4640                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4641                                  MLX5_FLOW_LAYER_INNER_L4 :
4642                                  MLX5_FLOW_LAYER_OUTER_L4;
4643                 if (!(item_flags & layer))
4644                         return rte_flow_error_set(error, EINVAL,
4645                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4646                                                   NULL, "no transport layer "
4647                                                   "in pattern");
4648         }
4649         return ret;
4650 }
4651
4652 /**
4653  * Validate the modify-header actions of increment/decrement
4654  * TCP Sequence-number.
4655  *
4656  * @param[in] action_flags
4657  *   Holds the actions detected until now.
4658  * @param[in] action
4659  *   Pointer to the modify action.
4660  * @param[in] item_flags
4661  *   Holds the items detected.
4662  * @param[out] error
4663  *   Pointer to error structure.
4664  *
4665  * @return
4666  *   0 on success, a negative errno value otherwise and rte_errno is set.
4667  */
4668 static int
4669 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4670                                        const struct rte_flow_action *action,
4671                                        const uint64_t item_flags,
4672                                        struct rte_flow_error *error)
4673 {
4674         int ret = 0;
4675         uint64_t layer;
4676
4677         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4678         if (!ret) {
4679                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4680                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4681                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4682                 if (!(item_flags & layer))
4683                         return rte_flow_error_set(error, EINVAL,
4684                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4685                                                   NULL, "no TCP item in"
4686                                                   " pattern");
4687                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4688                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4689                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4690                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4691                         return rte_flow_error_set(error, EINVAL,
4692                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4693                                                   NULL,
4694                                                   "cannot decrease and increase"
4695                                                   " TCP sequence number"
4696                                                   " at the same time");
4697         }
4698         return ret;
4699 }
4700
4701 /**
4702  * Validate the modify-header actions of increment/decrement
4703  * TCP Acknowledgment number.
4704  *
4705  * @param[in] action_flags
4706  *   Holds the actions detected until now.
4707  * @param[in] action
4708  *   Pointer to the modify action.
4709  * @param[in] item_flags
4710  *   Holds the items detected.
4711  * @param[out] error
4712  *   Pointer to error structure.
4713  *
4714  * @return
4715  *   0 on success, a negative errno value otherwise and rte_errno is set.
4716  */
4717 static int
4718 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4719                                        const struct rte_flow_action *action,
4720                                        const uint64_t item_flags,
4721                                        struct rte_flow_error *error)
4722 {
4723         int ret = 0;
4724         uint64_t layer;
4725
4726         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4727         if (!ret) {
4728                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4729                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4730                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4731                 if (!(item_flags & layer))
4732                         return rte_flow_error_set(error, EINVAL,
4733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4734                                                   NULL, "no TCP item in"
4735                                                   " pattern");
4736                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4737                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4738                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4739                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4740                         return rte_flow_error_set(error, EINVAL,
4741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4742                                                   NULL,
4743                                                   "cannot decrease and increase"
4744                                                   " TCP acknowledgment number"
4745                                                   " at the same time");
4746         }
4747         return ret;
4748 }
4749
4750 /**
4751  * Validate the modify-header TTL actions.
4752  *
4753  * @param[in] action_flags
4754  *   Holds the actions detected until now.
4755  * @param[in] action
4756  *   Pointer to the modify action.
4757  * @param[in] item_flags
4758  *   Holds the items detected.
4759  * @param[out] error
4760  *   Pointer to error structure.
4761  *
4762  * @return
4763  *   0 on success, a negative errno value otherwise and rte_errno is set.
4764  */
4765 static int
4766 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4767                                    const struct rte_flow_action *action,
4768                                    const uint64_t item_flags,
4769                                    struct rte_flow_error *error)
4770 {
4771         int ret = 0;
4772         uint64_t layer;
4773
4774         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4775         if (!ret) {
4776                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4777                                  MLX5_FLOW_LAYER_INNER_L3 :
4778                                  MLX5_FLOW_LAYER_OUTER_L3;
4779                 if (!(item_flags & layer))
4780                         return rte_flow_error_set(error, EINVAL,
4781                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4782                                                   NULL,
4783                                                   "no IP protocol in pattern");
4784         }
4785         return ret;
4786 }
4787
4788 /**
4789  * Validate the generic modify field actions.
4790  * @param[in] dev
4791  *   Pointer to the rte_eth_dev structure.
4792  * @param[in] action_flags
4793  *   Holds the actions detected until now.
4794  * @param[in] action
4795  *   Pointer to the modify action.
4796  * @param[in] attr
4797  *   Pointer to the flow attributes.
4798  * @param[out] error
4799  *   Pointer to error structure.
4800  *
4801  * @return
4802  *   Number of header fields to modify (0 or more) on success,
4803  *   a negative errno value otherwise and rte_errno is set.
4804  */
4805 static int
4806 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4807                                    const uint64_t action_flags,
4808                                    const struct rte_flow_action *action,
4809                                    const struct rte_flow_attr *attr,
4810                                    struct rte_flow_error *error)
4811 {
4812         int ret = 0;
4813         struct mlx5_priv *priv = dev->data->dev_private;
4814         struct mlx5_dev_config *config = &priv->config;
4815         const struct rte_flow_action_modify_field *action_modify_field =
4816                 action->conf;
4817         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4818                                 action_modify_field->dst.field,
4819                                 -1, attr, error);
4820         uint32_t src_width = mlx5_flow_item_field_width(dev,
4821                                 action_modify_field->src.field,
4822                                 dst_width, attr, error);
4823
4824         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4825         if (ret)
4826                 return ret;
4827
4828         if (action_modify_field->width == 0)
4829                 return rte_flow_error_set(error, EINVAL,
4830                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4831                                 "no bits are requested to be modified");
4832         else if (action_modify_field->width > dst_width ||
4833                  action_modify_field->width > src_width)
4834                 return rte_flow_error_set(error, EINVAL,
4835                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4836                                 "cannot modify more bits than"
4837                                 " the width of a field");
4838         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4839             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4840                 if ((action_modify_field->dst.offset +
4841                      action_modify_field->width > dst_width) ||
4842                     (action_modify_field->dst.offset % 32))
4843                         return rte_flow_error_set(error, EINVAL,
4844                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4845                                         "destination offset is too big"
4846                                         " or not aligned to 4 bytes");
4847                 if (action_modify_field->dst.level &&
4848                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4849                         return rte_flow_error_set(error, ENOTSUP,
4850                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4851                                         "inner header fields modification"
4852                                         " is not supported");
4853         }
4854         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4855             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4856                 if (!attr->transfer && !attr->group)
4857                         return rte_flow_error_set(error, ENOTSUP,
4858                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4859                                         "modify field action is not"
4860                                         " supported for group 0");
4861                 if ((action_modify_field->src.offset +
4862                      action_modify_field->width > src_width) ||
4863                     (action_modify_field->src.offset % 32))
4864                         return rte_flow_error_set(error, EINVAL,
4865                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4866                                         "source offset is too big"
4867                                         " or not aligned to 4 bytes");
4868                 if (action_modify_field->src.level &&
4869                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4870                         return rte_flow_error_set(error, ENOTSUP,
4871                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4872                                         "inner header fields modification"
4873                                         " is not supported");
4874         }
4875         if ((action_modify_field->dst.field ==
4876              action_modify_field->src.field) &&
4877             (action_modify_field->dst.level ==
4878              action_modify_field->src.level))
4879                 return rte_flow_error_set(error, EINVAL,
4880                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4881                                 "source and destination fields"
4882                                 " cannot be the same");
4883         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4884             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4885             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4886                 return rte_flow_error_set(error, EINVAL,
4887                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4888                                 "mark, immediate value or a pointer to it"
4889                                 " cannot be used as a destination");
4890         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4891             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4892                 return rte_flow_error_set(error, ENOTSUP,
4893                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4894                                 "modifications of an arbitrary"
4895                                 " place in a packet is not supported");
4896         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4897             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4898                 return rte_flow_error_set(error, ENOTSUP,
4899                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4900                                 "modifications of the 802.1Q Tag"
4901                                 " Identifier is not supported");
4902         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4903             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4904                 return rte_flow_error_set(error, ENOTSUP,
4905                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4906                                 "modifications of the VXLAN Network"
4907                                 " Identifier is not supported");
4908         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4909             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4910                 return rte_flow_error_set(error, ENOTSUP,
4911                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4912                                 "modifications of the GENEVE Network"
4913                                 " Identifier is not supported");
4914         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4915             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4916                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4917                     !mlx5_flow_ext_mreg_supported(dev))
4918                         return rte_flow_error_set(error, ENOTSUP,
4919                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4920                                         "cannot modify mark in legacy mode"
4921                                         " or without extensive registers");
4922         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4923             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4924                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4925                     !mlx5_flow_ext_mreg_supported(dev))
4926                         return rte_flow_error_set(error, ENOTSUP,
4927                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4928                                         "cannot modify meta without"
4929                                         " extensive registers support");
4930                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4931                 if (ret < 0 || ret == REG_NON)
4932                         return rte_flow_error_set(error, ENOTSUP,
4933                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4934                                         "cannot modify meta without"
4935                                         " extensive registers available");
4936         }
4937         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4938                 return rte_flow_error_set(error, ENOTSUP,
4939                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4940                                 "add and sub operations"
4941                                 " are not supported");
4942         return (action_modify_field->width / 32) +
4943                !!(action_modify_field->width % 32);
4944 }
4945
4946 /**
4947  * Validate jump action.
4948  *
4949  * @param[in] action
4950  *   Pointer to the jump action.
4951  * @param[in] action_flags
4952  *   Holds the actions detected until now.
4953  * @param[in] attributes
4954  *   Pointer to flow attributes
4955  * @param[in] external
4956  *   Action belongs to flow rule created by request external to PMD.
4957  * @param[out] error
4958  *   Pointer to error structure.
4959  *
4960  * @return
4961  *   0 on success, a negative errno value otherwise and rte_errno is set.
4962  */
4963 static int
4964 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4965                              const struct mlx5_flow_tunnel *tunnel,
4966                              const struct rte_flow_action *action,
4967                              uint64_t action_flags,
4968                              const struct rte_flow_attr *attributes,
4969                              bool external, struct rte_flow_error *error)
4970 {
4971         uint32_t target_group, table = 0;
4972         int ret = 0;
4973         struct flow_grp_info grp_info = {
4974                 .external = !!external,
4975                 .transfer = !!attributes->transfer,
4976                 .fdb_def_rule = 1,
4977                 .std_tbl_fix = 0
4978         };
4979         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4980                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4981                 return rte_flow_error_set(error, EINVAL,
4982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4983                                           "can't have 2 fate actions in"
4984                                           " same flow");
4985         if (!action->conf)
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4988                                           NULL, "action configuration not set");
4989         target_group =
4990                 ((const struct rte_flow_action_jump *)action->conf)->group;
4991         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4992                                        &grp_info, error);
4993         if (ret)
4994                 return ret;
4995         if (attributes->group == target_group &&
4996             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4997                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4998                 return rte_flow_error_set(error, EINVAL,
4999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5000                                           "target group must be other than"
5001                                           " the current flow group");
5002         if (table == 0)
5003                 return rte_flow_error_set(error, EINVAL,
5004                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5005                                           NULL, "root table shouldn't be destination");
5006         return 0;
5007 }
5008
5009 /*
5010  * Validate action PORT_ID / REPRESENTED_PORT.
5011  *
5012  * @param[in] dev
5013  *   Pointer to rte_eth_dev structure.
5014  * @param[in] action_flags
5015  *   Bit-fields that holds the actions detected until now.
5016  * @param[in] action
5017  *   PORT_ID / REPRESENTED_PORT action structure.
5018  * @param[in] attr
5019  *   Attributes of flow that includes this action.
5020  * @param[out] error
5021  *   Pointer to error structure.
5022  *
5023  * @return
5024  *   0 on success, a negative errno value otherwise and rte_errno is set.
5025  */
5026 static int
5027 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5028                                 uint64_t action_flags,
5029                                 const struct rte_flow_action *action,
5030                                 const struct rte_flow_attr *attr,
5031                                 struct rte_flow_error *error)
5032 {
5033         const struct rte_flow_action_port_id *port_id;
5034         const struct rte_flow_action_ethdev *ethdev;
5035         struct mlx5_priv *act_priv;
5036         struct mlx5_priv *dev_priv;
5037         uint16_t port;
5038
5039         if (!attr->transfer)
5040                 return rte_flow_error_set(error, ENOTSUP,
5041                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5042                                           NULL,
5043                                           "port action is valid in transfer"
5044                                           " mode only");
5045         if (!action || !action->conf)
5046                 return rte_flow_error_set(error, ENOTSUP,
5047                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5048                                           NULL,
5049                                           "port action parameters must be"
5050                                           " specified");
5051         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5052                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5053                 return rte_flow_error_set(error, EINVAL,
5054                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5055                                           "can have only one fate actions in"
5056                                           " a flow");
5057         dev_priv = mlx5_dev_to_eswitch_info(dev);
5058         if (!dev_priv)
5059                 return rte_flow_error_set(error, rte_errno,
5060                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5061                                           NULL,
5062                                           "failed to obtain E-Switch info");
5063         switch (action->type) {
5064         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5065                 port_id = action->conf;
5066                 port = port_id->original ? dev->data->port_id : port_id->id;
5067                 break;
5068         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5069                 ethdev = action->conf;
5070                 port = ethdev->port_id;
5071                 break;
5072         default:
5073                 MLX5_ASSERT(false);
5074                 return rte_flow_error_set
5075                                 (error, EINVAL,
5076                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5077                                  "unknown E-Switch action");
5078         }
5079         act_priv = mlx5_port_to_eswitch_info(port, false);
5080         if (!act_priv)
5081                 return rte_flow_error_set
5082                                 (error, rte_errno,
5083                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5084                                  "failed to obtain E-Switch port id for port");
5085         if (act_priv->domain_id != dev_priv->domain_id)
5086                 return rte_flow_error_set
5087                                 (error, EINVAL,
5088                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5089                                  "port does not belong to"
5090                                  " E-Switch being configured");
5091         return 0;
5092 }
5093
5094 /**
5095  * Get the maximum number of modify header actions.
5096  *
5097  * @param dev
5098  *   Pointer to rte_eth_dev structure.
5099  * @param root
5100  *   Whether action is on root table.
5101  *
5102  * @return
5103  *   Max number of modify header actions device can support.
5104  */
5105 static inline unsigned int
5106 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5107                               bool root)
5108 {
5109         /*
5110          * There's no way to directly query the max capacity from FW.
5111          * The maximal value on root table should be assumed to be supported.
5112          */
5113         if (!root)
5114                 return MLX5_MAX_MODIFY_NUM;
5115         else
5116                 return MLX5_ROOT_TBL_MODIFY_NUM;
5117 }
5118
5119 /**
5120  * Validate the meter action.
5121  *
5122  * @param[in] dev
5123  *   Pointer to rte_eth_dev structure.
5124  * @param[in] action_flags
5125  *   Bit-fields that holds the actions detected until now.
5126  * @param[in] item_flags
5127  *   Holds the items detected.
5128  * @param[in] action
5129  *   Pointer to the meter action.
5130  * @param[in] attr
5131  *   Attributes of flow that includes this action.
5132  * @param[in] port_id_item
5133  *   Pointer to item indicating port id.
5134  * @param[out] error
5135  *   Pointer to error structure.
5136  *
5137  * @return
5138  *   0 on success, a negative errno value otherwise and rte_errno is set.
5139  */
5140 static int
5141 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5142                                 uint64_t action_flags, uint64_t item_flags,
5143                                 const struct rte_flow_action *action,
5144                                 const struct rte_flow_attr *attr,
5145                                 const struct rte_flow_item *port_id_item,
5146                                 bool *def_policy,
5147                                 struct rte_flow_error *error)
5148 {
5149         struct mlx5_priv *priv = dev->data->dev_private;
5150         const struct rte_flow_action_meter *am = action->conf;
5151         struct mlx5_flow_meter_info *fm;
5152         struct mlx5_flow_meter_policy *mtr_policy;
5153         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5154
5155         if (!am)
5156                 return rte_flow_error_set(error, EINVAL,
5157                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5158                                           "meter action conf is NULL");
5159
5160         if (action_flags & MLX5_FLOW_ACTION_METER)
5161                 return rte_flow_error_set(error, ENOTSUP,
5162                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5163                                           "meter chaining not support");
5164         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5165                 return rte_flow_error_set(error, ENOTSUP,
5166                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5167                                           "meter with jump not support");
5168         if (!priv->mtr_en)
5169                 return rte_flow_error_set(error, ENOTSUP,
5170                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5171                                           NULL,
5172                                           "meter action not supported");
5173         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5174         if (!fm)
5175                 return rte_flow_error_set(error, EINVAL,
5176                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5177                                           "Meter not found");
5178         /* aso meter can always be shared by different domains */
5179         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5180             !(fm->transfer == attr->transfer ||
5181               (!fm->ingress && !attr->ingress && attr->egress) ||
5182               (!fm->egress && !attr->egress && attr->ingress)))
5183                 return rte_flow_error_set(error, EINVAL,
5184                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5185                         "Flow attributes domain are either invalid "
5186                         "or have a domain conflict with current "
5187                         "meter attributes");
5188         if (fm->def_policy) {
5189                 if (!((attr->transfer &&
5190                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5191                         (attr->egress &&
5192                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5193                         (attr->ingress &&
5194                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5195                         return rte_flow_error_set(error, EINVAL,
5196                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5197                                           "Flow attributes domain "
5198                                           "have a conflict with current "
5199                                           "meter domain attributes");
5200                 *def_policy = true;
5201         } else {
5202                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5203                                                 fm->policy_id, NULL);
5204                 if (!mtr_policy)
5205                         return rte_flow_error_set(error, EINVAL,
5206                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5207                                           "Invalid policy id for meter ");
5208                 if (!((attr->transfer && mtr_policy->transfer) ||
5209                         (attr->egress && mtr_policy->egress) ||
5210                         (attr->ingress && mtr_policy->ingress)))
5211                         return rte_flow_error_set(error, EINVAL,
5212                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5213                                           "Flow attributes domain "
5214                                           "have a conflict with current "
5215                                           "meter domain attributes");
5216                 if (attr->transfer && mtr_policy->dev) {
5217                         /**
5218                          * When policy has fate action of port_id,
5219                          * the flow should have the same src port as policy.
5220                          */
5221                         struct mlx5_priv *policy_port_priv =
5222                                         mtr_policy->dev->data->dev_private;
5223                         int32_t flow_src_port = priv->representor_id;
5224
5225                         if (port_id_item) {
5226                                 const struct rte_flow_item_port_id *spec =
5227                                                         port_id_item->spec;
5228                                 struct mlx5_priv *port_priv =
5229                                         mlx5_port_to_eswitch_info(spec->id,
5230                                                                   false);
5231                                 if (!port_priv)
5232                                         return rte_flow_error_set(error,
5233                                                 rte_errno,
5234                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5235                                                 spec,
5236                                                 "Failed to get port info.");
5237                                 flow_src_port = port_priv->representor_id;
5238                         }
5239                         if (flow_src_port != policy_port_priv->representor_id)
5240                                 return rte_flow_error_set(error,
5241                                                 rte_errno,
5242                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5243                                                 NULL,
5244                                                 "Flow and meter policy "
5245                                                 "have different src port.");
5246                 } else if (mtr_policy->is_rss) {
5247                         struct mlx5_flow_meter_policy *fp;
5248                         struct mlx5_meter_policy_action_container *acg;
5249                         struct mlx5_meter_policy_action_container *acy;
5250                         const struct rte_flow_action *rss_act;
5251                         int ret;
5252
5253                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5254                                                                 mtr_policy);
5255                         if (fp == NULL)
5256                                 return rte_flow_error_set(error, EINVAL,
5257                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5258                                                   "Unable to get the final "
5259                                                   "policy in the hierarchy");
5260                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5261                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5262                         MLX5_ASSERT(acg->fate_action ==
5263                                     MLX5_FLOW_FATE_SHARED_RSS ||
5264                                     acy->fate_action ==
5265                                     MLX5_FLOW_FATE_SHARED_RSS);
5266                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5267                                 rss_act = acg->rss;
5268                         else
5269                                 rss_act = acy->rss;
5270                         ret = mlx5_flow_validate_action_rss(rss_act,
5271                                         action_flags, dev, attr,
5272                                         item_flags, error);
5273                         if (ret)
5274                                 return ret;
5275                 }
5276                 *def_policy = false;
5277         }
5278         return 0;
5279 }
5280
5281 /**
5282  * Validate the age action.
5283  *
5284  * @param[in] action_flags
5285  *   Holds the actions detected until now.
5286  * @param[in] action
5287  *   Pointer to the age action.
5288  * @param[in] dev
5289  *   Pointer to the Ethernet device structure.
5290  * @param[out] error
5291  *   Pointer to error structure.
5292  *
5293  * @return
5294  *   0 on success, a negative errno value otherwise and rte_errno is set.
5295  */
5296 static int
5297 flow_dv_validate_action_age(uint64_t action_flags,
5298                             const struct rte_flow_action *action,
5299                             struct rte_eth_dev *dev,
5300                             struct rte_flow_error *error)
5301 {
5302         struct mlx5_priv *priv = dev->data->dev_private;
5303         const struct rte_flow_action_age *age = action->conf;
5304
5305         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5306             !priv->sh->aso_age_mng))
5307                 return rte_flow_error_set(error, ENOTSUP,
5308                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5309                                           NULL,
5310                                           "age action not supported");
5311         if (!(action->conf))
5312                 return rte_flow_error_set(error, EINVAL,
5313                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5314                                           "configuration cannot be null");
5315         if (!(age->timeout))
5316                 return rte_flow_error_set(error, EINVAL,
5317                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5318                                           "invalid timeout value 0");
5319         if (action_flags & MLX5_FLOW_ACTION_AGE)
5320                 return rte_flow_error_set(error, EINVAL,
5321                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5322                                           "duplicate age actions set");
5323         return 0;
5324 }
5325
5326 /**
5327  * Validate the modify-header IPv4 DSCP actions.
5328  *
5329  * @param[in] action_flags
5330  *   Holds the actions detected until now.
5331  * @param[in] action
5332  *   Pointer to the modify action.
5333  * @param[in] item_flags
5334  *   Holds the items detected.
5335  * @param[out] error
5336  *   Pointer to error structure.
5337  *
5338  * @return
5339  *   0 on success, a negative errno value otherwise and rte_errno is set.
5340  */
5341 static int
5342 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5343                                          const struct rte_flow_action *action,
5344                                          const uint64_t item_flags,
5345                                          struct rte_flow_error *error)
5346 {
5347         int ret = 0;
5348
5349         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5350         if (!ret) {
5351                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5352                         return rte_flow_error_set(error, EINVAL,
5353                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5354                                                   NULL,
5355                                                   "no ipv4 item in pattern");
5356         }
5357         return ret;
5358 }
5359
5360 /**
5361  * Validate the modify-header IPv6 DSCP actions.
5362  *
5363  * @param[in] action_flags
5364  *   Holds the actions detected until now.
5365  * @param[in] action
5366  *   Pointer to the modify action.
5367  * @param[in] item_flags
5368  *   Holds the items detected.
5369  * @param[out] error
5370  *   Pointer to error structure.
5371  *
5372  * @return
5373  *   0 on success, a negative errno value otherwise and rte_errno is set.
5374  */
5375 static int
5376 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5377                                          const struct rte_flow_action *action,
5378                                          const uint64_t item_flags,
5379                                          struct rte_flow_error *error)
5380 {
5381         int ret = 0;
5382
5383         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5384         if (!ret) {
5385                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5386                         return rte_flow_error_set(error, EINVAL,
5387                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5388                                                   NULL,
5389                                                   "no ipv6 item in pattern");
5390         }
5391         return ret;
5392 }
5393
5394 int
5395 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5396                         struct mlx5_list_entry *entry, void *cb_ctx)
5397 {
5398         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5399         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5400         struct mlx5_flow_dv_modify_hdr_resource *resource =
5401                                   container_of(entry, typeof(*resource), entry);
5402         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5403
5404         key_len += ref->actions_num * sizeof(ref->actions[0]);
5405         return ref->actions_num != resource->actions_num ||
5406                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5407 }
5408
5409 static struct mlx5_indexed_pool *
5410 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5411 {
5412         struct mlx5_indexed_pool *ipool = __atomic_load_n
5413                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5414
5415         if (!ipool) {
5416                 struct mlx5_indexed_pool *expected = NULL;
5417                 struct mlx5_indexed_pool_config cfg =
5418                     (struct mlx5_indexed_pool_config) {
5419                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5420                                                                    (index + 1) *
5421                                            sizeof(struct mlx5_modification_cmd),
5422                        .trunk_size = 64,
5423                        .grow_trunk = 3,
5424                        .grow_shift = 2,
5425                        .need_lock = 1,
5426                        .release_mem_en = !!sh->reclaim_mode,
5427                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5428                        .malloc = mlx5_malloc,
5429                        .free = mlx5_free,
5430                        .type = "mlx5_modify_action_resource",
5431                 };
5432
5433                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5434                 ipool = mlx5_ipool_create(&cfg);
5435                 if (!ipool)
5436                         return NULL;
5437                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5438                                                  &expected, ipool, false,
5439                                                  __ATOMIC_SEQ_CST,
5440                                                  __ATOMIC_SEQ_CST)) {
5441                         mlx5_ipool_destroy(ipool);
5442                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5443                                                 __ATOMIC_SEQ_CST);
5444                 }
5445         }
5446         return ipool;
5447 }
5448
5449 struct mlx5_list_entry *
5450 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5451 {
5452         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5453         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5454         struct mlx5dv_dr_domain *ns;
5455         struct mlx5_flow_dv_modify_hdr_resource *entry;
5456         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5457         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5458                                                           ref->actions_num - 1);
5459         int ret;
5460         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5461         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5462         uint32_t idx;
5463
5464         if (unlikely(!ipool)) {
5465                 rte_flow_error_set(ctx->error, ENOMEM,
5466                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5467                                    NULL, "cannot allocate modify ipool");
5468                 return NULL;
5469         }
5470         entry = mlx5_ipool_zmalloc(ipool, &idx);
5471         if (!entry) {
5472                 rte_flow_error_set(ctx->error, ENOMEM,
5473                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5474                                    "cannot allocate resource memory");
5475                 return NULL;
5476         }
5477         rte_memcpy(&entry->ft_type,
5478                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5479                    key_len + data_len);
5480         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5481                 ns = sh->fdb_domain;
5482         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5483                 ns = sh->tx_domain;
5484         else
5485                 ns = sh->rx_domain;
5486         ret = mlx5_flow_os_create_flow_action_modify_header
5487                                         (sh->cdev->ctx, ns, entry,
5488                                          data_len, &entry->action);
5489         if (ret) {
5490                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5491                 rte_flow_error_set(ctx->error, ENOMEM,
5492                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5493                                    NULL, "cannot create modification action");
5494                 return NULL;
5495         }
5496         entry->idx = idx;
5497         return &entry->entry;
5498 }
5499
5500 struct mlx5_list_entry *
5501 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5502                         void *cb_ctx)
5503 {
5504         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5505         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5506         struct mlx5_flow_dv_modify_hdr_resource *entry;
5507         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5508         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5509         uint32_t idx;
5510
5511         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5512                                   &idx);
5513         if (!entry) {
5514                 rte_flow_error_set(ctx->error, ENOMEM,
5515                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5516                                    "cannot allocate resource memory");
5517                 return NULL;
5518         }
5519         memcpy(entry, oentry, sizeof(*entry) + data_len);
5520         entry->idx = idx;
5521         return &entry->entry;
5522 }
5523
5524 void
5525 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5526 {
5527         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5528         struct mlx5_flow_dv_modify_hdr_resource *res =
5529                 container_of(entry, typeof(*res), entry);
5530
5531         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5532 }
5533
5534 /**
5535  * Validate the sample action.
5536  *
5537  * @param[in, out] action_flags
5538  *   Holds the actions detected until now.
5539  * @param[in] action
5540  *   Pointer to the sample action.
5541  * @param[in] dev
5542  *   Pointer to the Ethernet device structure.
5543  * @param[in] attr
5544  *   Attributes of flow that includes this action.
5545  * @param[in] item_flags
5546  *   Holds the items detected.
5547  * @param[in] rss
5548  *   Pointer to the RSS action.
5549  * @param[out] sample_rss
5550  *   Pointer to the RSS action in sample action list.
5551  * @param[out] count
5552  *   Pointer to the COUNT action in sample action list.
5553  * @param[out] fdb_mirror_limit
5554  *   Pointer to the FDB mirror limitation flag.
5555  * @param[out] error
5556  *   Pointer to error structure.
5557  *
5558  * @return
5559  *   0 on success, a negative errno value otherwise and rte_errno is set.
5560  */
5561 static int
5562 flow_dv_validate_action_sample(uint64_t *action_flags,
5563                                const struct rte_flow_action *action,
5564                                struct rte_eth_dev *dev,
5565                                const struct rte_flow_attr *attr,
5566                                uint64_t item_flags,
5567                                const struct rte_flow_action_rss *rss,
5568                                const struct rte_flow_action_rss **sample_rss,
5569                                const struct rte_flow_action_count **count,
5570                                int *fdb_mirror_limit,
5571                                struct rte_flow_error *error)
5572 {
5573         struct mlx5_priv *priv = dev->data->dev_private;
5574         struct mlx5_dev_config *dev_conf = &priv->config;
5575         const struct rte_flow_action_sample *sample = action->conf;
5576         const struct rte_flow_action *act;
5577         uint64_t sub_action_flags = 0;
5578         uint16_t queue_index = 0xFFFF;
5579         int actions_n = 0;
5580         int ret;
5581
5582         if (!sample)
5583                 return rte_flow_error_set(error, EINVAL,
5584                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5585                                           "configuration cannot be NULL");
5586         if (sample->ratio == 0)
5587                 return rte_flow_error_set(error, EINVAL,
5588                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5589                                           "ratio value starts from 1");
5590         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5591                 return rte_flow_error_set(error, ENOTSUP,
5592                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5593                                           NULL,
5594                                           "sample action not supported");
5595         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5596                 return rte_flow_error_set(error, EINVAL,
5597                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5598                                           "Multiple sample actions not "
5599                                           "supported");
5600         if (*action_flags & MLX5_FLOW_ACTION_METER)
5601                 return rte_flow_error_set(error, EINVAL,
5602                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5603                                           "wrong action order, meter should "
5604                                           "be after sample action");
5605         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5606                 return rte_flow_error_set(error, EINVAL,
5607                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5608                                           "wrong action order, jump should "
5609                                           "be after sample action");
5610         if (*action_flags & MLX5_FLOW_ACTION_CT)
5611                 return rte_flow_error_set(error, EINVAL,
5612                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5613                                           "Sample after CT not supported");
5614         act = sample->actions;
5615         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5616                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5617                         return rte_flow_error_set(error, ENOTSUP,
5618                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5619                                                   act, "too many actions");
5620                 switch (act->type) {
5621                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5622                         ret = mlx5_flow_validate_action_queue(act,
5623                                                               sub_action_flags,
5624                                                               dev,
5625                                                               attr, error);
5626                         if (ret < 0)
5627                                 return ret;
5628                         queue_index = ((const struct rte_flow_action_queue *)
5629                                                         (act->conf))->index;
5630                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5631                         ++actions_n;
5632                         break;
5633                 case RTE_FLOW_ACTION_TYPE_RSS:
5634                         *sample_rss = act->conf;
5635                         ret = mlx5_flow_validate_action_rss(act,
5636                                                             sub_action_flags,
5637                                                             dev, attr,
5638                                                             item_flags,
5639                                                             error);
5640                         if (ret < 0)
5641                                 return ret;
5642                         if (rss && *sample_rss &&
5643                             ((*sample_rss)->level != rss->level ||
5644                             (*sample_rss)->types != rss->types))
5645                                 return rte_flow_error_set(error, ENOTSUP,
5646                                         RTE_FLOW_ERROR_TYPE_ACTION,
5647                                         NULL,
5648                                         "Can't use the different RSS types "
5649                                         "or level in the same flow");
5650                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5651                                 queue_index = (*sample_rss)->queue[0];
5652                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5653                         ++actions_n;
5654                         break;
5655                 case RTE_FLOW_ACTION_TYPE_MARK:
5656                         ret = flow_dv_validate_action_mark(dev, act,
5657                                                            sub_action_flags,
5658                                                            attr, error);
5659                         if (ret < 0)
5660                                 return ret;
5661                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5662                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5663                                                 MLX5_FLOW_ACTION_MARK_EXT;
5664                         else
5665                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5666                         ++actions_n;
5667                         break;
5668                 case RTE_FLOW_ACTION_TYPE_COUNT:
5669                         ret = flow_dv_validate_action_count
5670                                 (dev, false, *action_flags | sub_action_flags,
5671                                  error);
5672                         if (ret < 0)
5673                                 return ret;
5674                         *count = act->conf;
5675                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5676                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5677                         ++actions_n;
5678                         break;
5679                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5680                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5681                         ret = flow_dv_validate_action_port_id(dev,
5682                                                               sub_action_flags,
5683                                                               act,
5684                                                               attr,
5685                                                               error);
5686                         if (ret)
5687                                 return ret;
5688                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5689                         ++actions_n;
5690                         break;
5691                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5692                         ret = flow_dv_validate_action_raw_encap_decap
5693                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5694                                  &actions_n, action, item_flags, error);
5695                         if (ret < 0)
5696                                 return ret;
5697                         ++actions_n;
5698                         break;
5699                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5700                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5701                         ret = flow_dv_validate_action_l2_encap(dev,
5702                                                                sub_action_flags,
5703                                                                act, attr,
5704                                                                error);
5705                         if (ret < 0)
5706                                 return ret;
5707                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5708                         ++actions_n;
5709                         break;
5710                 default:
5711                         return rte_flow_error_set(error, ENOTSUP,
5712                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5713                                                   NULL,
5714                                                   "Doesn't support optional "
5715                                                   "action");
5716                 }
5717         }
5718         if (attr->ingress && !attr->transfer) {
5719                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5720                                           MLX5_FLOW_ACTION_RSS)))
5721                         return rte_flow_error_set(error, EINVAL,
5722                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5723                                                   NULL,
5724                                                   "Ingress must has a dest "
5725                                                   "QUEUE for Sample");
5726         } else if (attr->egress && !attr->transfer) {
5727                 return rte_flow_error_set(error, ENOTSUP,
5728                                           RTE_FLOW_ERROR_TYPE_ACTION,
5729                                           NULL,
5730                                           "Sample Only support Ingress "
5731                                           "or E-Switch");
5732         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5733                 MLX5_ASSERT(attr->transfer);
5734                 if (sample->ratio > 1)
5735                         return rte_flow_error_set(error, ENOTSUP,
5736                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5737                                                   NULL,
5738                                                   "E-Switch doesn't support "
5739                                                   "any optional action "
5740                                                   "for sampling");
5741                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5742                         return rte_flow_error_set(error, ENOTSUP,
5743                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5744                                                   NULL,
5745                                                   "unsupported action QUEUE");
5746                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5747                         return rte_flow_error_set(error, ENOTSUP,
5748                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5749                                                   NULL,
5750                                                   "unsupported action QUEUE");
5751                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5752                         return rte_flow_error_set(error, EINVAL,
5753                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5754                                                   NULL,
5755                                                   "E-Switch must has a dest "
5756                                                   "port for mirroring");
5757                 if (!priv->config.hca_attr.reg_c_preserve &&
5758                      priv->representor_id != UINT16_MAX)
5759                         *fdb_mirror_limit = 1;
5760         }
5761         /* Continue validation for Xcap actions.*/
5762         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5763             (queue_index == 0xFFFF ||
5764              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5765                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5766                      MLX5_FLOW_XCAP_ACTIONS)
5767                         return rte_flow_error_set(error, ENOTSUP,
5768                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5769                                                   NULL, "encap and decap "
5770                                                   "combination aren't "
5771                                                   "supported");
5772                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5773                                                         MLX5_FLOW_ACTION_ENCAP))
5774                         return rte_flow_error_set(error, ENOTSUP,
5775                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5776                                                   NULL, "encap is not supported"
5777                                                   " for ingress traffic");
5778         }
5779         return 0;
5780 }
5781
5782 /**
5783  * Find existing modify-header resource or create and register a new one.
5784  *
5785  * @param dev[in, out]
5786  *   Pointer to rte_eth_dev structure.
5787  * @param[in, out] resource
5788  *   Pointer to modify-header resource.
5789  * @parm[in, out] dev_flow
5790  *   Pointer to the dev_flow.
5791  * @param[out] error
5792  *   pointer to error structure.
5793  *
5794  * @return
5795  *   0 on success otherwise -errno and errno is set.
5796  */
5797 static int
5798 flow_dv_modify_hdr_resource_register
5799                         (struct rte_eth_dev *dev,
5800                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5801                          struct mlx5_flow *dev_flow,
5802                          struct rte_flow_error *error)
5803 {
5804         struct mlx5_priv *priv = dev->data->dev_private;
5805         struct mlx5_dev_ctx_shared *sh = priv->sh;
5806         uint32_t key_len = sizeof(*resource) -
5807                            offsetof(typeof(*resource), ft_type) +
5808                            resource->actions_num * sizeof(resource->actions[0]);
5809         struct mlx5_list_entry *entry;
5810         struct mlx5_flow_cb_ctx ctx = {
5811                 .error = error,
5812                 .data = resource,
5813         };
5814         struct mlx5_hlist *modify_cmds;
5815         uint64_t key64;
5816
5817         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5818                                 "hdr_modify",
5819                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5820                                 true, false, sh,
5821                                 flow_dv_modify_create_cb,
5822                                 flow_dv_modify_match_cb,
5823                                 flow_dv_modify_remove_cb,
5824                                 flow_dv_modify_clone_cb,
5825                                 flow_dv_modify_clone_free_cb);
5826         if (unlikely(!modify_cmds))
5827                 return -rte_errno;
5828         resource->root = !dev_flow->dv.group;
5829         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5830                                                                 resource->root))
5831                 return rte_flow_error_set(error, EOVERFLOW,
5832                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5833                                           "too many modify header items");
5834         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5835         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5836         if (!entry)
5837                 return -rte_errno;
5838         resource = container_of(entry, typeof(*resource), entry);
5839         dev_flow->handle->dvh.modify_hdr = resource;
5840         return 0;
5841 }
5842
5843 /**
5844  * Get DV flow counter by index.
5845  *
5846  * @param[in] dev
5847  *   Pointer to the Ethernet device structure.
5848  * @param[in] idx
5849  *   mlx5 flow counter index in the container.
5850  * @param[out] ppool
5851  *   mlx5 flow counter pool in the container.
5852  *
5853  * @return
5854  *   Pointer to the counter, NULL otherwise.
5855  */
5856 static struct mlx5_flow_counter *
5857 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5858                            uint32_t idx,
5859                            struct mlx5_flow_counter_pool **ppool)
5860 {
5861         struct mlx5_priv *priv = dev->data->dev_private;
5862         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5863         struct mlx5_flow_counter_pool *pool;
5864
5865         /* Decrease to original index and clear shared bit. */
5866         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5867         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5868         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5869         MLX5_ASSERT(pool);
5870         if (ppool)
5871                 *ppool = pool;
5872         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5873 }
5874
5875 /**
5876  * Check the devx counter belongs to the pool.
5877  *
5878  * @param[in] pool
5879  *   Pointer to the counter pool.
5880  * @param[in] id
5881  *   The counter devx ID.
5882  *
5883  * @return
5884  *   True if counter belongs to the pool, false otherwise.
5885  */
5886 static bool
5887 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5888 {
5889         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5890                    MLX5_COUNTERS_PER_POOL;
5891
5892         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5893                 return true;
5894         return false;
5895 }
5896
5897 /**
5898  * Get a pool by devx counter ID.
5899  *
5900  * @param[in] cmng
5901  *   Pointer to the counter management.
5902  * @param[in] id
5903  *   The counter devx ID.
5904  *
5905  * @return
5906  *   The counter pool pointer if exists, NULL otherwise,
5907  */
5908 static struct mlx5_flow_counter_pool *
5909 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5910 {
5911         uint32_t i;
5912         struct mlx5_flow_counter_pool *pool = NULL;
5913
5914         rte_spinlock_lock(&cmng->pool_update_sl);
5915         /* Check last used pool. */
5916         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5917             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5918                 pool = cmng->pools[cmng->last_pool_idx];
5919                 goto out;
5920         }
5921         /* ID out of range means no suitable pool in the container. */
5922         if (id > cmng->max_id || id < cmng->min_id)
5923                 goto out;
5924         /*
5925          * Find the pool from the end of the container, since mostly counter
5926          * ID is sequence increasing, and the last pool should be the needed
5927          * one.
5928          */
5929         i = cmng->n_valid;
5930         while (i--) {
5931                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5932
5933                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5934                         pool = pool_tmp;
5935                         break;
5936                 }
5937         }
5938 out:
5939         rte_spinlock_unlock(&cmng->pool_update_sl);
5940         return pool;
5941 }
5942
5943 /**
5944  * Resize a counter container.
5945  *
5946  * @param[in] dev
5947  *   Pointer to the Ethernet device structure.
5948  *
5949  * @return
5950  *   0 on success, otherwise negative errno value and rte_errno is set.
5951  */
5952 static int
5953 flow_dv_container_resize(struct rte_eth_dev *dev)
5954 {
5955         struct mlx5_priv *priv = dev->data->dev_private;
5956         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5957         void *old_pools = cmng->pools;
5958         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5959         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5960         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5961
5962         if (!pools) {
5963                 rte_errno = ENOMEM;
5964                 return -ENOMEM;
5965         }
5966         if (old_pools)
5967                 memcpy(pools, old_pools, cmng->n *
5968                                        sizeof(struct mlx5_flow_counter_pool *));
5969         cmng->n = resize;
5970         cmng->pools = pools;
5971         if (old_pools)
5972                 mlx5_free(old_pools);
5973         return 0;
5974 }
5975
5976 /**
5977  * Query a devx flow counter.
5978  *
5979  * @param[in] dev
5980  *   Pointer to the Ethernet device structure.
5981  * @param[in] counter
5982  *   Index to the flow counter.
5983  * @param[out] pkts
5984  *   The statistics value of packets.
5985  * @param[out] bytes
5986  *   The statistics value of bytes.
5987  *
5988  * @return
5989  *   0 on success, otherwise a negative errno value and rte_errno is set.
5990  */
5991 static inline int
5992 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5993                      uint64_t *bytes)
5994 {
5995         struct mlx5_priv *priv = dev->data->dev_private;
5996         struct mlx5_flow_counter_pool *pool = NULL;
5997         struct mlx5_flow_counter *cnt;
5998         int offset;
5999
6000         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6001         MLX5_ASSERT(pool);
6002         if (priv->sh->cmng.counter_fallback)
6003                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6004                                         0, pkts, bytes, 0, NULL, NULL, 0);
6005         rte_spinlock_lock(&pool->sl);
6006         if (!pool->raw) {
6007                 *pkts = 0;
6008                 *bytes = 0;
6009         } else {
6010                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6011                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6012                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6013         }
6014         rte_spinlock_unlock(&pool->sl);
6015         return 0;
6016 }
6017
6018 /**
6019  * Create and initialize a new counter pool.
6020  *
6021  * @param[in] dev
6022  *   Pointer to the Ethernet device structure.
6023  * @param[out] dcs
6024  *   The devX counter handle.
6025  * @param[in] age
6026  *   Whether the pool is for counter that was allocated for aging.
6027  * @param[in/out] cont_cur
6028  *   Pointer to the container pointer, it will be update in pool resize.
6029  *
6030  * @return
6031  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6032  */
6033 static struct mlx5_flow_counter_pool *
6034 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6035                     uint32_t age)
6036 {
6037         struct mlx5_priv *priv = dev->data->dev_private;
6038         struct mlx5_flow_counter_pool *pool;
6039         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6040         bool fallback = priv->sh->cmng.counter_fallback;
6041         uint32_t size = sizeof(*pool);
6042
6043         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6044         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6045         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6046         if (!pool) {
6047                 rte_errno = ENOMEM;
6048                 return NULL;
6049         }
6050         pool->raw = NULL;
6051         pool->is_aged = !!age;
6052         pool->query_gen = 0;
6053         pool->min_dcs = dcs;
6054         rte_spinlock_init(&pool->sl);
6055         rte_spinlock_init(&pool->csl);
6056         TAILQ_INIT(&pool->counters[0]);
6057         TAILQ_INIT(&pool->counters[1]);
6058         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6059         rte_spinlock_lock(&cmng->pool_update_sl);
6060         pool->index = cmng->n_valid;
6061         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6062                 mlx5_free(pool);
6063                 rte_spinlock_unlock(&cmng->pool_update_sl);
6064                 return NULL;
6065         }
6066         cmng->pools[pool->index] = pool;
6067         cmng->n_valid++;
6068         if (unlikely(fallback)) {
6069                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6070
6071                 if (base < cmng->min_id)
6072                         cmng->min_id = base;
6073                 if (base > cmng->max_id)
6074                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6075                 cmng->last_pool_idx = pool->index;
6076         }
6077         rte_spinlock_unlock(&cmng->pool_update_sl);
6078         return pool;
6079 }
6080
6081 /**
6082  * Prepare a new counter and/or a new counter pool.
6083  *
6084  * @param[in] dev
6085  *   Pointer to the Ethernet device structure.
6086  * @param[out] cnt_free
6087  *   Where to put the pointer of a new counter.
6088  * @param[in] age
6089  *   Whether the pool is for counter that was allocated for aging.
6090  *
6091  * @return
6092  *   The counter pool pointer and @p cnt_free is set on success,
6093  *   NULL otherwise and rte_errno is set.
6094  */
6095 static struct mlx5_flow_counter_pool *
6096 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6097                              struct mlx5_flow_counter **cnt_free,
6098                              uint32_t age)
6099 {
6100         struct mlx5_priv *priv = dev->data->dev_private;
6101         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6102         struct mlx5_flow_counter_pool *pool;
6103         struct mlx5_counters tmp_tq;
6104         struct mlx5_devx_obj *dcs = NULL;
6105         struct mlx5_flow_counter *cnt;
6106         enum mlx5_counter_type cnt_type =
6107                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6108         bool fallback = priv->sh->cmng.counter_fallback;
6109         uint32_t i;
6110
6111         if (fallback) {
6112                 /* bulk_bitmap must be 0 for single counter allocation. */
6113                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6114                 if (!dcs)
6115                         return NULL;
6116                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6117                 if (!pool) {
6118                         pool = flow_dv_pool_create(dev, dcs, age);
6119                         if (!pool) {
6120                                 mlx5_devx_cmd_destroy(dcs);
6121                                 return NULL;
6122                         }
6123                 }
6124                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6125                 cnt = MLX5_POOL_GET_CNT(pool, i);
6126                 cnt->pool = pool;
6127                 cnt->dcs_when_free = dcs;
6128                 *cnt_free = cnt;
6129                 return pool;
6130         }
6131         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6132         if (!dcs) {
6133                 rte_errno = ENODATA;
6134                 return NULL;
6135         }
6136         pool = flow_dv_pool_create(dev, dcs, age);
6137         if (!pool) {
6138                 mlx5_devx_cmd_destroy(dcs);
6139                 return NULL;
6140         }
6141         TAILQ_INIT(&tmp_tq);
6142         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6143                 cnt = MLX5_POOL_GET_CNT(pool, i);
6144                 cnt->pool = pool;
6145                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6146         }
6147         rte_spinlock_lock(&cmng->csl[cnt_type]);
6148         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6149         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6150         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6151         (*cnt_free)->pool = pool;
6152         return pool;
6153 }
6154
6155 /**
6156  * Allocate a flow counter.
6157  *
6158  * @param[in] dev
6159  *   Pointer to the Ethernet device structure.
6160  * @param[in] age
6161  *   Whether the counter was allocated for aging.
6162  *
6163  * @return
6164  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6165  */
6166 static uint32_t
6167 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6168 {
6169         struct mlx5_priv *priv = dev->data->dev_private;
6170         struct mlx5_flow_counter_pool *pool = NULL;
6171         struct mlx5_flow_counter *cnt_free = NULL;
6172         bool fallback = priv->sh->cmng.counter_fallback;
6173         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6174         enum mlx5_counter_type cnt_type =
6175                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6176         uint32_t cnt_idx;
6177
6178         if (!priv->sh->devx) {
6179                 rte_errno = ENOTSUP;
6180                 return 0;
6181         }
6182         /* Get free counters from container. */
6183         rte_spinlock_lock(&cmng->csl[cnt_type]);
6184         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6185         if (cnt_free)
6186                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6187         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6188         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6189                 goto err;
6190         pool = cnt_free->pool;
6191         if (fallback)
6192                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6193         /* Create a DV counter action only in the first time usage. */
6194         if (!cnt_free->action) {
6195                 uint16_t offset;
6196                 struct mlx5_devx_obj *dcs;
6197                 int ret;
6198
6199                 if (!fallback) {
6200                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6201                         dcs = pool->min_dcs;
6202                 } else {
6203                         offset = 0;
6204                         dcs = cnt_free->dcs_when_free;
6205                 }
6206                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6207                                                             &cnt_free->action);
6208                 if (ret) {
6209                         rte_errno = errno;
6210                         goto err;
6211                 }
6212         }
6213         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6214                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6215         /* Update the counter reset values. */
6216         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6217                                  &cnt_free->bytes))
6218                 goto err;
6219         if (!fallback && !priv->sh->cmng.query_thread_on)
6220                 /* Start the asynchronous batch query by the host thread. */
6221                 mlx5_set_query_alarm(priv->sh);
6222         /*
6223          * When the count action isn't shared (by ID), shared_info field is
6224          * used for indirect action API's refcnt.
6225          * When the counter action is not shared neither by ID nor by indirect
6226          * action API, shared info must be 1.
6227          */
6228         cnt_free->shared_info.refcnt = 1;
6229         return cnt_idx;
6230 err:
6231         if (cnt_free) {
6232                 cnt_free->pool = pool;
6233                 if (fallback)
6234                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6235                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6236                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6237                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6238         }
6239         return 0;
6240 }
6241
6242 /**
6243  * Get age param from counter index.
6244  *
6245  * @param[in] dev
6246  *   Pointer to the Ethernet device structure.
6247  * @param[in] counter
6248  *   Index to the counter handler.
6249  *
6250  * @return
6251  *   The aging parameter specified for the counter index.
6252  */
6253 static struct mlx5_age_param*
6254 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6255                                 uint32_t counter)
6256 {
6257         struct mlx5_flow_counter *cnt;
6258         struct mlx5_flow_counter_pool *pool = NULL;
6259
6260         flow_dv_counter_get_by_idx(dev, counter, &pool);
6261         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6262         cnt = MLX5_POOL_GET_CNT(pool, counter);
6263         return MLX5_CNT_TO_AGE(cnt);
6264 }
6265
6266 /**
6267  * Remove a flow counter from aged counter list.
6268  *
6269  * @param[in] dev
6270  *   Pointer to the Ethernet device structure.
6271  * @param[in] counter
6272  *   Index to the counter handler.
6273  * @param[in] cnt
6274  *   Pointer to the counter handler.
6275  */
6276 static void
6277 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6278                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6279 {
6280         struct mlx5_age_info *age_info;
6281         struct mlx5_age_param *age_param;
6282         struct mlx5_priv *priv = dev->data->dev_private;
6283         uint16_t expected = AGE_CANDIDATE;
6284
6285         age_info = GET_PORT_AGE_INFO(priv);
6286         age_param = flow_dv_counter_idx_get_age(dev, counter);
6287         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6288                                          AGE_FREE, false, __ATOMIC_RELAXED,
6289                                          __ATOMIC_RELAXED)) {
6290                 /**
6291                  * We need the lock even it is age timeout,
6292                  * since counter may still in process.
6293                  */
6294                 rte_spinlock_lock(&age_info->aged_sl);
6295                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6296                 rte_spinlock_unlock(&age_info->aged_sl);
6297                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6298         }
6299 }
6300
6301 /**
6302  * Release a flow counter.
6303  *
6304  * @param[in] dev
6305  *   Pointer to the Ethernet device structure.
6306  * @param[in] counter
6307  *   Index to the counter handler.
6308  */
6309 static void
6310 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6311 {
6312         struct mlx5_priv *priv = dev->data->dev_private;
6313         struct mlx5_flow_counter_pool *pool = NULL;
6314         struct mlx5_flow_counter *cnt;
6315         enum mlx5_counter_type cnt_type;
6316
6317         if (!counter)
6318                 return;
6319         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6320         MLX5_ASSERT(pool);
6321         if (pool->is_aged) {
6322                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6323         } else {
6324                 /*
6325                  * If the counter action is shared by indirect action API,
6326                  * the atomic function reduces its references counter.
6327                  * If after the reduction the action is still referenced, the
6328                  * function returns here and does not release it.
6329                  * When the counter action is not shared by
6330                  * indirect action API, shared info is 1 before the reduction,
6331                  * so this condition is failed and function doesn't return here.
6332                  */
6333                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6334                                        __ATOMIC_RELAXED))
6335                         return;
6336         }
6337         cnt->pool = pool;
6338         /*
6339          * Put the counter back to list to be updated in none fallback mode.
6340          * Currently, we are using two list alternately, while one is in query,
6341          * add the freed counter to the other list based on the pool query_gen
6342          * value. After query finishes, add counter the list to the global
6343          * container counter list. The list changes while query starts. In
6344          * this case, lock will not be needed as query callback and release
6345          * function both operate with the different list.
6346          */
6347         if (!priv->sh->cmng.counter_fallback) {
6348                 rte_spinlock_lock(&pool->csl);
6349                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6350                 rte_spinlock_unlock(&pool->csl);
6351         } else {
6352                 cnt->dcs_when_free = cnt->dcs_when_active;
6353                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6354                                            MLX5_COUNTER_TYPE_ORIGIN;
6355                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6356                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6357                                   cnt, next);
6358                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6359         }
6360 }
6361
6362 /**
6363  * Resize a meter id container.
6364  *
6365  * @param[in] dev
6366  *   Pointer to the Ethernet device structure.
6367  *
6368  * @return
6369  *   0 on success, otherwise negative errno value and rte_errno is set.
6370  */
6371 static int
6372 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6373 {
6374         struct mlx5_priv *priv = dev->data->dev_private;
6375         struct mlx5_aso_mtr_pools_mng *pools_mng =
6376                                 &priv->sh->mtrmng->pools_mng;
6377         void *old_pools = pools_mng->pools;
6378         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6379         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6380         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6381
6382         if (!pools) {
6383                 rte_errno = ENOMEM;
6384                 return -ENOMEM;
6385         }
6386         if (!pools_mng->n)
6387                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6388                         mlx5_free(pools);
6389                         return -ENOMEM;
6390                 }
6391         if (old_pools)
6392                 memcpy(pools, old_pools, pools_mng->n *
6393                                        sizeof(struct mlx5_aso_mtr_pool *));
6394         pools_mng->n = resize;
6395         pools_mng->pools = pools;
6396         if (old_pools)
6397                 mlx5_free(old_pools);
6398         return 0;
6399 }
6400
6401 /**
6402  * Prepare a new meter and/or a new meter pool.
6403  *
6404  * @param[in] dev
6405  *   Pointer to the Ethernet device structure.
6406  * @param[out] mtr_free
6407  *   Where to put the pointer of a new meter.g.
6408  *
6409  * @return
6410  *   The meter pool pointer and @mtr_free is set on success,
6411  *   NULL otherwise and rte_errno is set.
6412  */
6413 static struct mlx5_aso_mtr_pool *
6414 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6415 {
6416         struct mlx5_priv *priv = dev->data->dev_private;
6417         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6418         struct mlx5_aso_mtr_pool *pool = NULL;
6419         struct mlx5_devx_obj *dcs = NULL;
6420         uint32_t i;
6421         uint32_t log_obj_size;
6422
6423         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6424         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6425                                                       priv->sh->cdev->pdn,
6426                                                       log_obj_size);
6427         if (!dcs) {
6428                 rte_errno = ENODATA;
6429                 return NULL;
6430         }
6431         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6432         if (!pool) {
6433                 rte_errno = ENOMEM;
6434                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6435                 return NULL;
6436         }
6437         pool->devx_obj = dcs;
6438         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6439         pool->index = pools_mng->n_valid;
6440         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6441                 mlx5_free(pool);
6442                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6443                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6444                 return NULL;
6445         }
6446         pools_mng->pools[pool->index] = pool;
6447         pools_mng->n_valid++;
6448         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6449         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6450                 pool->mtrs[i].offset = i;
6451                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6452         }
6453         pool->mtrs[0].offset = 0;
6454         *mtr_free = &pool->mtrs[0];
6455         return pool;
6456 }
6457
6458 /**
6459  * Release a flow meter into pool.
6460  *
6461  * @param[in] dev
6462  *   Pointer to the Ethernet device structure.
6463  * @param[in] mtr_idx
6464  *   Index to aso flow meter.
6465  */
6466 static void
6467 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6468 {
6469         struct mlx5_priv *priv = dev->data->dev_private;
6470         struct mlx5_aso_mtr_pools_mng *pools_mng =
6471                                 &priv->sh->mtrmng->pools_mng;
6472         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6473
6474         MLX5_ASSERT(aso_mtr);
6475         rte_spinlock_lock(&pools_mng->mtrsl);
6476         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6477         aso_mtr->state = ASO_METER_FREE;
6478         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6479         rte_spinlock_unlock(&pools_mng->mtrsl);
6480 }
6481
6482 /**
6483  * Allocate a aso flow meter.
6484  *
6485  * @param[in] dev
6486  *   Pointer to the Ethernet device structure.
6487  *
6488  * @return
6489  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6490  */
6491 static uint32_t
6492 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6493 {
6494         struct mlx5_priv *priv = dev->data->dev_private;
6495         struct mlx5_aso_mtr *mtr_free = NULL;
6496         struct mlx5_aso_mtr_pools_mng *pools_mng =
6497                                 &priv->sh->mtrmng->pools_mng;
6498         struct mlx5_aso_mtr_pool *pool;
6499         uint32_t mtr_idx = 0;
6500
6501         if (!priv->sh->devx) {
6502                 rte_errno = ENOTSUP;
6503                 return 0;
6504         }
6505         /* Allocate the flow meter memory. */
6506         /* Get free meters from management. */
6507         rte_spinlock_lock(&pools_mng->mtrsl);
6508         mtr_free = LIST_FIRST(&pools_mng->meters);
6509         if (mtr_free)
6510                 LIST_REMOVE(mtr_free, next);
6511         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6512                 rte_spinlock_unlock(&pools_mng->mtrsl);
6513                 return 0;
6514         }
6515         mtr_free->state = ASO_METER_WAIT;
6516         rte_spinlock_unlock(&pools_mng->mtrsl);
6517         pool = container_of(mtr_free,
6518                         struct mlx5_aso_mtr_pool,
6519                         mtrs[mtr_free->offset]);
6520         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6521         if (!mtr_free->fm.meter_action) {
6522 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6523                 struct rte_flow_error error;
6524                 uint8_t reg_id;
6525
6526                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6527                 mtr_free->fm.meter_action =
6528                         mlx5_glue->dv_create_flow_action_aso
6529                                                 (priv->sh->rx_domain,
6530                                                  pool->devx_obj->obj,
6531                                                  mtr_free->offset,
6532                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6533                                                  reg_id - REG_C_0);
6534 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6535                 if (!mtr_free->fm.meter_action) {
6536                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6537                         return 0;
6538                 }
6539         }
6540         return mtr_idx;
6541 }
6542
6543 /**
6544  * Verify the @p attributes will be correctly understood by the NIC and store
6545  * them in the @p flow if everything is correct.
6546  *
6547  * @param[in] dev
6548  *   Pointer to dev struct.
6549  * @param[in] attributes
6550  *   Pointer to flow attributes
6551  * @param[in] external
6552  *   This flow rule is created by request external to PMD.
6553  * @param[out] error
6554  *   Pointer to error structure.
6555  *
6556  * @return
6557  *   - 0 on success and non root table.
6558  *   - 1 on success and root table.
6559  *   - a negative errno value otherwise and rte_errno is set.
6560  */
6561 static int
6562 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6563                             const struct mlx5_flow_tunnel *tunnel,
6564                             const struct rte_flow_attr *attributes,
6565                             const struct flow_grp_info *grp_info,
6566                             struct rte_flow_error *error)
6567 {
6568         struct mlx5_priv *priv = dev->data->dev_private;
6569         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6570         int ret = 0;
6571
6572 #ifndef HAVE_MLX5DV_DR
6573         RTE_SET_USED(tunnel);
6574         RTE_SET_USED(grp_info);
6575         if (attributes->group)
6576                 return rte_flow_error_set(error, ENOTSUP,
6577                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6578                                           NULL,
6579                                           "groups are not supported");
6580 #else
6581         uint32_t table = 0;
6582
6583         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6584                                        grp_info, error);
6585         if (ret)
6586                 return ret;
6587         if (!table)
6588                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6589 #endif
6590         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6591             attributes->priority > lowest_priority)
6592                 return rte_flow_error_set(error, ENOTSUP,
6593                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6594                                           NULL,
6595                                           "priority out of range");
6596         if (attributes->transfer) {
6597                 if (!priv->config.dv_esw_en)
6598                         return rte_flow_error_set
6599                                 (error, ENOTSUP,
6600                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6601                                  "E-Switch dr is not supported");
6602                 if (!(priv->representor || priv->master))
6603                         return rte_flow_error_set
6604                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6605                                  NULL, "E-Switch configuration can only be"
6606                                  " done by a master or a representor device");
6607                 if (attributes->egress)
6608                         return rte_flow_error_set
6609                                 (error, ENOTSUP,
6610                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6611                                  "egress is not supported");
6612         }
6613         if (!(attributes->egress ^ attributes->ingress))
6614                 return rte_flow_error_set(error, ENOTSUP,
6615                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6616                                           "must specify exactly one of "
6617                                           "ingress or egress");
6618         return ret;
6619 }
6620
6621 static int
6622 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6623                         int64_t pattern_flags, uint64_t l3_flags,
6624                         uint64_t l4_flags, uint64_t ip4_flag,
6625                         struct rte_flow_error *error)
6626 {
6627         if (mask->l3_ok && !(pattern_flags & l3_flags))
6628                 return rte_flow_error_set(error, EINVAL,
6629                                           RTE_FLOW_ERROR_TYPE_ITEM,
6630                                           NULL, "missing L3 protocol");
6631
6632         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6633                 return rte_flow_error_set(error, EINVAL,
6634                                           RTE_FLOW_ERROR_TYPE_ITEM,
6635                                           NULL, "missing IPv4 protocol");
6636
6637         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6638                 return rte_flow_error_set(error, EINVAL,
6639                                           RTE_FLOW_ERROR_TYPE_ITEM,
6640                                           NULL, "missing L4 protocol");
6641
6642         return 0;
6643 }
6644
6645 static int
6646 flow_dv_validate_item_integrity_post(const struct
6647                                      rte_flow_item *integrity_items[2],
6648                                      int64_t pattern_flags,
6649                                      struct rte_flow_error *error)
6650 {
6651         const struct rte_flow_item_integrity *mask;
6652         int ret;
6653
6654         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6655                 mask = (typeof(mask))integrity_items[0]->mask;
6656                 ret = validate_integrity_bits(mask, pattern_flags,
6657                                               MLX5_FLOW_LAYER_OUTER_L3,
6658                                               MLX5_FLOW_LAYER_OUTER_L4,
6659                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6660                                               error);
6661                 if (ret)
6662                         return ret;
6663         }
6664         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6665                 mask = (typeof(mask))integrity_items[1]->mask;
6666                 ret = validate_integrity_bits(mask, pattern_flags,
6667                                               MLX5_FLOW_LAYER_INNER_L3,
6668                                               MLX5_FLOW_LAYER_INNER_L4,
6669                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6670                                               error);
6671                 if (ret)
6672                         return ret;
6673         }
6674         return 0;
6675 }
6676
6677 static int
6678 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6679                                 const struct rte_flow_item *integrity_item,
6680                                 uint64_t pattern_flags, uint64_t *last_item,
6681                                 const struct rte_flow_item *integrity_items[2],
6682                                 struct rte_flow_error *error)
6683 {
6684         struct mlx5_priv *priv = dev->data->dev_private;
6685         const struct rte_flow_item_integrity *mask = (typeof(mask))
6686                                                      integrity_item->mask;
6687         const struct rte_flow_item_integrity *spec = (typeof(spec))
6688                                                      integrity_item->spec;
6689
6690         if (!priv->config.hca_attr.pkt_integrity_match)
6691                 return rte_flow_error_set(error, ENOTSUP,
6692                                           RTE_FLOW_ERROR_TYPE_ITEM,
6693                                           integrity_item,
6694                                           "packet integrity integrity_item not supported");
6695         if (!spec)
6696                 return rte_flow_error_set(error, ENOTSUP,
6697                                           RTE_FLOW_ERROR_TYPE_ITEM,
6698                                           integrity_item,
6699                                           "no spec for integrity item");
6700         if (!mask)
6701                 mask = &rte_flow_item_integrity_mask;
6702         if (!mlx5_validate_integrity_item(mask))
6703                 return rte_flow_error_set(error, ENOTSUP,
6704                                           RTE_FLOW_ERROR_TYPE_ITEM,
6705                                           integrity_item,
6706                                           "unsupported integrity filter");
6707         if (spec->level > 1) {
6708                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6709                         return rte_flow_error_set
6710                                 (error, ENOTSUP,
6711                                  RTE_FLOW_ERROR_TYPE_ITEM,
6712                                  NULL, "multiple inner integrity items not supported");
6713                 integrity_items[1] = integrity_item;
6714                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6715         } else {
6716                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6717                         return rte_flow_error_set
6718                                 (error, ENOTSUP,
6719                                  RTE_FLOW_ERROR_TYPE_ITEM,
6720                                  NULL, "multiple outer integrity items not supported");
6721                 integrity_items[0] = integrity_item;
6722                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6723         }
6724         return 0;
6725 }
6726
6727 static int
6728 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6729                            const struct rte_flow_item *item,
6730                            uint64_t item_flags,
6731                            uint64_t *last_item,
6732                            bool is_inner,
6733                            struct rte_flow_error *error)
6734 {
6735         const struct rte_flow_item_flex *flow_spec = item->spec;
6736         const struct rte_flow_item_flex *flow_mask = item->mask;
6737         struct mlx5_flex_item *flex;
6738
6739         if (!flow_spec)
6740                 return rte_flow_error_set(error, EINVAL,
6741                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6742                                           "flex flow item spec cannot be NULL");
6743         if (!flow_mask)
6744                 return rte_flow_error_set(error, EINVAL,
6745                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6746                                           "flex flow item mask cannot be NULL");
6747         if (item->last)
6748                 return rte_flow_error_set(error, ENOTSUP,
6749                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6750                                           "flex flow item last not supported");
6751         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6752                 return rte_flow_error_set(error, EINVAL,
6753                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6754                                           "invalid flex flow item handle");
6755         flex = (struct mlx5_flex_item *)flow_spec->handle;
6756         switch (flex->tunnel_mode) {
6757         case FLEX_TUNNEL_MODE_SINGLE:
6758                 if (item_flags &
6759                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6760                         rte_flow_error_set(error, EINVAL,
6761                                            RTE_FLOW_ERROR_TYPE_ITEM,
6762                                            NULL, "multiple flex items not supported");
6763                 break;
6764         case FLEX_TUNNEL_MODE_OUTER:
6765                 if (is_inner)
6766                         rte_flow_error_set(error, EINVAL,
6767                                            RTE_FLOW_ERROR_TYPE_ITEM,
6768                                            NULL, "inner flex item was not configured");
6769                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6770                         rte_flow_error_set(error, ENOTSUP,
6771                                            RTE_FLOW_ERROR_TYPE_ITEM,
6772                                            NULL, "multiple flex items not supported");
6773                 break;
6774         case FLEX_TUNNEL_MODE_INNER:
6775                 if (!is_inner)
6776                         rte_flow_error_set(error, EINVAL,
6777                                            RTE_FLOW_ERROR_TYPE_ITEM,
6778                                            NULL, "outer flex item was not configured");
6779                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6780                         rte_flow_error_set(error, EINVAL,
6781                                            RTE_FLOW_ERROR_TYPE_ITEM,
6782                                            NULL, "multiple flex items not supported");
6783                 break;
6784         case FLEX_TUNNEL_MODE_MULTI:
6785                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6786                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6787                         rte_flow_error_set(error, EINVAL,
6788                                            RTE_FLOW_ERROR_TYPE_ITEM,
6789                                            NULL, "multiple flex items not supported");
6790                 }
6791                 break;
6792         case FLEX_TUNNEL_MODE_TUNNEL:
6793                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6794                         rte_flow_error_set(error, EINVAL,
6795                                            RTE_FLOW_ERROR_TYPE_ITEM,
6796                                            NULL, "multiple flex tunnel items not supported");
6797                 break;
6798         default:
6799                 rte_flow_error_set(error, EINVAL,
6800                                    RTE_FLOW_ERROR_TYPE_ITEM,
6801                                    NULL, "invalid flex item configuration");
6802         }
6803         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6804                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6805                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6806         return 0;
6807 }
6808
6809 /**
6810  * Internal validation function. For validating both actions and items.
6811  *
6812  * @param[in] dev
6813  *   Pointer to the rte_eth_dev structure.
6814  * @param[in] attr
6815  *   Pointer to the flow attributes.
6816  * @param[in] items
6817  *   Pointer to the list of items.
6818  * @param[in] actions
6819  *   Pointer to the list of actions.
6820  * @param[in] external
6821  *   This flow rule is created by request external to PMD.
6822  * @param[in] hairpin
6823  *   Number of hairpin TX actions, 0 means classic flow.
6824  * @param[out] error
6825  *   Pointer to the error structure.
6826  *
6827  * @return
6828  *   0 on success, a negative errno value otherwise and rte_errno is set.
6829  */
6830 static int
6831 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6832                  const struct rte_flow_item items[],
6833                  const struct rte_flow_action actions[],
6834                  bool external, int hairpin, struct rte_flow_error *error)
6835 {
6836         int ret;
6837         uint64_t action_flags = 0;
6838         uint64_t item_flags = 0;
6839         uint64_t last_item = 0;
6840         uint8_t next_protocol = 0xff;
6841         uint16_t ether_type = 0;
6842         int actions_n = 0;
6843         uint8_t item_ipv6_proto = 0;
6844         int fdb_mirror_limit = 0;
6845         int modify_after_mirror = 0;
6846         const struct rte_flow_item *geneve_item = NULL;
6847         const struct rte_flow_item *gre_item = NULL;
6848         const struct rte_flow_item *gtp_item = NULL;
6849         const struct rte_flow_action_raw_decap *decap;
6850         const struct rte_flow_action_raw_encap *encap;
6851         const struct rte_flow_action_rss *rss = NULL;
6852         const struct rte_flow_action_rss *sample_rss = NULL;
6853         const struct rte_flow_action_count *sample_count = NULL;
6854         const struct rte_flow_item_tcp nic_tcp_mask = {
6855                 .hdr = {
6856                         .tcp_flags = 0xFF,
6857                         .src_port = RTE_BE16(UINT16_MAX),
6858                         .dst_port = RTE_BE16(UINT16_MAX),
6859                 }
6860         };
6861         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6862                 .hdr = {
6863                         .src_addr =
6864                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6865                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6866                         .dst_addr =
6867                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6868                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6869                         .vtc_flow = RTE_BE32(0xffffffff),
6870                         .proto = 0xff,
6871                         .hop_limits = 0xff,
6872                 },
6873                 .has_frag_ext = 1,
6874         };
6875         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6876                 .hdr = {
6877                         .common = {
6878                                 .u32 =
6879                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6880                                         .type = 0xFF,
6881                                         }).u32),
6882                         },
6883                         .dummy[0] = 0xffffffff,
6884                 },
6885         };
6886         struct mlx5_priv *priv = dev->data->dev_private;
6887         struct mlx5_dev_config *dev_conf = &priv->config;
6888         uint16_t queue_index = 0xFFFF;
6889         const struct rte_flow_item_vlan *vlan_m = NULL;
6890         uint32_t rw_act_num = 0;
6891         uint64_t is_root;
6892         const struct mlx5_flow_tunnel *tunnel;
6893         enum mlx5_tof_rule_type tof_rule_type;
6894         struct flow_grp_info grp_info = {
6895                 .external = !!external,
6896                 .transfer = !!attr->transfer,
6897                 .fdb_def_rule = !!priv->fdb_def_rule,
6898                 .std_tbl_fix = true,
6899         };
6900         const struct rte_eth_hairpin_conf *conf;
6901         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6902         const struct rte_flow_item *port_id_item = NULL;
6903         bool def_policy = false;
6904         uint16_t udp_dport = 0;
6905
6906         if (items == NULL)
6907                 return -1;
6908         tunnel = is_tunnel_offload_active(dev) ?
6909                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6910         if (tunnel) {
6911                 if (!priv->config.dv_flow_en)
6912                         return rte_flow_error_set
6913                                 (error, ENOTSUP,
6914                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6915                                  NULL, "tunnel offload requires DV flow interface");
6916                 if (priv->representor)
6917                         return rte_flow_error_set
6918                                 (error, ENOTSUP,
6919                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6920                                  NULL, "decap not supported for VF representor");
6921                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6922                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6923                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6924                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6925                                         MLX5_FLOW_ACTION_DECAP;
6926                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6927                                         (dev, attr, tunnel, tof_rule_type);
6928         }
6929         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6930         if (ret < 0)
6931                 return ret;
6932         is_root = (uint64_t)ret;
6933         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6934                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6935                 int type = items->type;
6936
6937                 if (!mlx5_flow_os_item_supported(type))
6938                         return rte_flow_error_set(error, ENOTSUP,
6939                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6940                                                   NULL, "item not supported");
6941                 switch (type) {
6942                 case RTE_FLOW_ITEM_TYPE_VOID:
6943                         break;
6944                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6945                         ret = flow_dv_validate_item_port_id
6946                                         (dev, items, attr, item_flags, error);
6947                         if (ret < 0)
6948                                 return ret;
6949                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6950                         port_id_item = items;
6951                         break;
6952                 case RTE_FLOW_ITEM_TYPE_ETH:
6953                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6954                                                           true, error);
6955                         if (ret < 0)
6956                                 return ret;
6957                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6958                                              MLX5_FLOW_LAYER_OUTER_L2;
6959                         if (items->mask != NULL && items->spec != NULL) {
6960                                 ether_type =
6961                                         ((const struct rte_flow_item_eth *)
6962                                          items->spec)->type;
6963                                 ether_type &=
6964                                         ((const struct rte_flow_item_eth *)
6965                                          items->mask)->type;
6966                                 ether_type = rte_be_to_cpu_16(ether_type);
6967                         } else {
6968                                 ether_type = 0;
6969                         }
6970                         break;
6971                 case RTE_FLOW_ITEM_TYPE_VLAN:
6972                         ret = flow_dv_validate_item_vlan(items, item_flags,
6973                                                          dev, error);
6974                         if (ret < 0)
6975                                 return ret;
6976                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6977                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6978                         if (items->mask != NULL && items->spec != NULL) {
6979                                 ether_type =
6980                                         ((const struct rte_flow_item_vlan *)
6981                                          items->spec)->inner_type;
6982                                 ether_type &=
6983                                         ((const struct rte_flow_item_vlan *)
6984                                          items->mask)->inner_type;
6985                                 ether_type = rte_be_to_cpu_16(ether_type);
6986                         } else {
6987                                 ether_type = 0;
6988                         }
6989                         /* Store outer VLAN mask for of_push_vlan action. */
6990                         if (!tunnel)
6991                                 vlan_m = items->mask;
6992                         break;
6993                 case RTE_FLOW_ITEM_TYPE_IPV4:
6994                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6995                                                   &item_flags, &tunnel);
6996                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6997                                                          last_item, ether_type,
6998                                                          error);
6999                         if (ret < 0)
7000                                 return ret;
7001                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7002                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7003                         if (items->mask != NULL &&
7004                             ((const struct rte_flow_item_ipv4 *)
7005                              items->mask)->hdr.next_proto_id) {
7006                                 next_protocol =
7007                                         ((const struct rte_flow_item_ipv4 *)
7008                                          (items->spec))->hdr.next_proto_id;
7009                                 next_protocol &=
7010                                         ((const struct rte_flow_item_ipv4 *)
7011                                          (items->mask))->hdr.next_proto_id;
7012                         } else {
7013                                 /* Reset for inner layer. */
7014                                 next_protocol = 0xff;
7015                         }
7016                         break;
7017                 case RTE_FLOW_ITEM_TYPE_IPV6:
7018                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7019                                                   &item_flags, &tunnel);
7020                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7021                                                            last_item,
7022                                                            ether_type,
7023                                                            &nic_ipv6_mask,
7024                                                            error);
7025                         if (ret < 0)
7026                                 return ret;
7027                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7028                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7029                         if (items->mask != NULL &&
7030                             ((const struct rte_flow_item_ipv6 *)
7031                              items->mask)->hdr.proto) {
7032                                 item_ipv6_proto =
7033                                         ((const struct rte_flow_item_ipv6 *)
7034                                          items->spec)->hdr.proto;
7035                                 next_protocol =
7036                                         ((const struct rte_flow_item_ipv6 *)
7037                                          items->spec)->hdr.proto;
7038                                 next_protocol &=
7039                                         ((const struct rte_flow_item_ipv6 *)
7040                                          items->mask)->hdr.proto;
7041                         } else {
7042                                 /* Reset for inner layer. */
7043                                 next_protocol = 0xff;
7044                         }
7045                         break;
7046                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7047                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7048                                                                   item_flags,
7049                                                                   error);
7050                         if (ret < 0)
7051                                 return ret;
7052                         last_item = tunnel ?
7053                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7054                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7055                         if (items->mask != NULL &&
7056                             ((const struct rte_flow_item_ipv6_frag_ext *)
7057                              items->mask)->hdr.next_header) {
7058                                 next_protocol =
7059                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7060                                  items->spec)->hdr.next_header;
7061                                 next_protocol &=
7062                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7063                                  items->mask)->hdr.next_header;
7064                         } else {
7065                                 /* Reset for inner layer. */
7066                                 next_protocol = 0xff;
7067                         }
7068                         break;
7069                 case RTE_FLOW_ITEM_TYPE_TCP:
7070                         ret = mlx5_flow_validate_item_tcp
7071                                                 (items, item_flags,
7072                                                  next_protocol,
7073                                                  &nic_tcp_mask,
7074                                                  error);
7075                         if (ret < 0)
7076                                 return ret;
7077                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7078                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7079                         break;
7080                 case RTE_FLOW_ITEM_TYPE_UDP:
7081                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7082                                                           next_protocol,
7083                                                           error);
7084                         const struct rte_flow_item_udp *spec = items->spec;
7085                         const struct rte_flow_item_udp *mask = items->mask;
7086                         if (!mask)
7087                                 mask = &rte_flow_item_udp_mask;
7088                         if (spec != NULL)
7089                                 udp_dport = rte_be_to_cpu_16
7090                                                 (spec->hdr.dst_port &
7091                                                  mask->hdr.dst_port);
7092                         if (ret < 0)
7093                                 return ret;
7094                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7095                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7096                         break;
7097                 case RTE_FLOW_ITEM_TYPE_GRE:
7098                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7099                                                           next_protocol, error);
7100                         if (ret < 0)
7101                                 return ret;
7102                         gre_item = items;
7103                         last_item = MLX5_FLOW_LAYER_GRE;
7104                         break;
7105                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7106                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7107                                                             next_protocol,
7108                                                             error);
7109                         if (ret < 0)
7110                                 return ret;
7111                         last_item = MLX5_FLOW_LAYER_NVGRE;
7112                         break;
7113                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7114                         ret = mlx5_flow_validate_item_gre_key
7115                                 (items, item_flags, gre_item, error);
7116                         if (ret < 0)
7117                                 return ret;
7118                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7119                         break;
7120                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7121                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7122                                                             items, item_flags,
7123                                                             attr, error);
7124                         if (ret < 0)
7125                                 return ret;
7126                         last_item = MLX5_FLOW_LAYER_VXLAN;
7127                         break;
7128                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7129                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7130                                                                 item_flags, dev,
7131                                                                 error);
7132                         if (ret < 0)
7133                                 return ret;
7134                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7135                         break;
7136                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7137                         ret = mlx5_flow_validate_item_geneve(items,
7138                                                              item_flags, dev,
7139                                                              error);
7140                         if (ret < 0)
7141                                 return ret;
7142                         geneve_item = items;
7143                         last_item = MLX5_FLOW_LAYER_GENEVE;
7144                         break;
7145                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7146                         ret = mlx5_flow_validate_item_geneve_opt(items,
7147                                                                  last_item,
7148                                                                  geneve_item,
7149                                                                  dev,
7150                                                                  error);
7151                         if (ret < 0)
7152                                 return ret;
7153                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7154                         break;
7155                 case RTE_FLOW_ITEM_TYPE_MPLS:
7156                         ret = mlx5_flow_validate_item_mpls(dev, items,
7157                                                            item_flags,
7158                                                            last_item, error);
7159                         if (ret < 0)
7160                                 return ret;
7161                         last_item = MLX5_FLOW_LAYER_MPLS;
7162                         break;
7163
7164                 case RTE_FLOW_ITEM_TYPE_MARK:
7165                         ret = flow_dv_validate_item_mark(dev, items, attr,
7166                                                          error);
7167                         if (ret < 0)
7168                                 return ret;
7169                         last_item = MLX5_FLOW_ITEM_MARK;
7170                         break;
7171                 case RTE_FLOW_ITEM_TYPE_META:
7172                         ret = flow_dv_validate_item_meta(dev, items, attr,
7173                                                          error);
7174                         if (ret < 0)
7175                                 return ret;
7176                         last_item = MLX5_FLOW_ITEM_METADATA;
7177                         break;
7178                 case RTE_FLOW_ITEM_TYPE_ICMP:
7179                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7180                                                            next_protocol,
7181                                                            error);
7182                         if (ret < 0)
7183                                 return ret;
7184                         last_item = MLX5_FLOW_LAYER_ICMP;
7185                         break;
7186                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7187                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7188                                                             next_protocol,
7189                                                             error);
7190                         if (ret < 0)
7191                                 return ret;
7192                         item_ipv6_proto = IPPROTO_ICMPV6;
7193                         last_item = MLX5_FLOW_LAYER_ICMP6;
7194                         break;
7195                 case RTE_FLOW_ITEM_TYPE_TAG:
7196                         ret = flow_dv_validate_item_tag(dev, items,
7197                                                         attr, error);
7198                         if (ret < 0)
7199                                 return ret;
7200                         last_item = MLX5_FLOW_ITEM_TAG;
7201                         break;
7202                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7203                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7204                         break;
7205                 case RTE_FLOW_ITEM_TYPE_GTP:
7206                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7207                                                         error);
7208                         if (ret < 0)
7209                                 return ret;
7210                         gtp_item = items;
7211                         last_item = MLX5_FLOW_LAYER_GTP;
7212                         break;
7213                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7214                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7215                                                             gtp_item, attr,
7216                                                             error);
7217                         if (ret < 0)
7218                                 return ret;
7219                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7220                         break;
7221                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7222                         /* Capacity will be checked in the translate stage. */
7223                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7224                                                             last_item,
7225                                                             ether_type,
7226                                                             &nic_ecpri_mask,
7227                                                             error);
7228                         if (ret < 0)
7229                                 return ret;
7230                         last_item = MLX5_FLOW_LAYER_ECPRI;
7231                         break;
7232                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7233                         ret = flow_dv_validate_item_integrity(dev, items,
7234                                                               item_flags,
7235                                                               &last_item,
7236                                                               integrity_items,
7237                                                               error);
7238                         if (ret < 0)
7239                                 return ret;
7240                         break;
7241                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7242                         ret = flow_dv_validate_item_aso_ct(dev, items,
7243                                                            &item_flags, error);
7244                         if (ret < 0)
7245                                 return ret;
7246                         break;
7247                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7248                         /* tunnel offload item was processed before
7249                          * list it here as a supported type
7250                          */
7251                         break;
7252                 case RTE_FLOW_ITEM_TYPE_FLEX:
7253                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7254                                                          &last_item,
7255                                                          tunnel != 0, error);
7256                         if (ret < 0)
7257                                 return ret;
7258                         break;
7259                 default:
7260                         return rte_flow_error_set(error, ENOTSUP,
7261                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7262                                                   NULL, "item not supported");
7263                 }
7264                 item_flags |= last_item;
7265         }
7266         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7267                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7268                                                            item_flags, error);
7269                 if (ret)
7270                         return ret;
7271         }
7272         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7273                 int type = actions->type;
7274                 bool shared_count = false;
7275
7276                 if (!mlx5_flow_os_action_supported(type))
7277                         return rte_flow_error_set(error, ENOTSUP,
7278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7279                                                   actions,
7280                                                   "action not supported");
7281                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7282                         return rte_flow_error_set(error, ENOTSUP,
7283                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7284                                                   actions, "too many actions");
7285                 if (action_flags &
7286                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7287                         return rte_flow_error_set(error, ENOTSUP,
7288                                 RTE_FLOW_ERROR_TYPE_ACTION,
7289                                 NULL, "meter action with policy "
7290                                 "must be the last action");
7291                 switch (type) {
7292                 case RTE_FLOW_ACTION_TYPE_VOID:
7293                         break;
7294                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7295                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7296                         ret = flow_dv_validate_action_port_id(dev,
7297                                                               action_flags,
7298                                                               actions,
7299                                                               attr,
7300                                                               error);
7301                         if (ret)
7302                                 return ret;
7303                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7304                         ++actions_n;
7305                         break;
7306                 case RTE_FLOW_ACTION_TYPE_FLAG:
7307                         ret = flow_dv_validate_action_flag(dev, action_flags,
7308                                                            attr, error);
7309                         if (ret < 0)
7310                                 return ret;
7311                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7312                                 /* Count all modify-header actions as one. */
7313                                 if (!(action_flags &
7314                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7315                                         ++actions_n;
7316                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7317                                                 MLX5_FLOW_ACTION_MARK_EXT;
7318                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7319                                         modify_after_mirror = 1;
7320
7321                         } else {
7322                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7323                                 ++actions_n;
7324                         }
7325                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7326                         break;
7327                 case RTE_FLOW_ACTION_TYPE_MARK:
7328                         ret = flow_dv_validate_action_mark(dev, actions,
7329                                                            action_flags,
7330                                                            attr, error);
7331                         if (ret < 0)
7332                                 return ret;
7333                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7334                                 /* Count all modify-header actions as one. */
7335                                 if (!(action_flags &
7336                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7337                                         ++actions_n;
7338                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7339                                                 MLX5_FLOW_ACTION_MARK_EXT;
7340                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7341                                         modify_after_mirror = 1;
7342                         } else {
7343                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7344                                 ++actions_n;
7345                         }
7346                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7347                         break;
7348                 case RTE_FLOW_ACTION_TYPE_SET_META:
7349                         ret = flow_dv_validate_action_set_meta(dev, actions,
7350                                                                action_flags,
7351                                                                attr, error);
7352                         if (ret < 0)
7353                                 return ret;
7354                         /* Count all modify-header actions as one action. */
7355                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7356                                 ++actions_n;
7357                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7358                                 modify_after_mirror = 1;
7359                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7360                         rw_act_num += MLX5_ACT_NUM_SET_META;
7361                         break;
7362                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7363                         ret = flow_dv_validate_action_set_tag(dev, actions,
7364                                                               action_flags,
7365                                                               attr, error);
7366                         if (ret < 0)
7367                                 return ret;
7368                         /* Count all modify-header actions as one action. */
7369                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7370                                 ++actions_n;
7371                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7372                                 modify_after_mirror = 1;
7373                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7374                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7375                         break;
7376                 case RTE_FLOW_ACTION_TYPE_DROP:
7377                         ret = mlx5_flow_validate_action_drop(action_flags,
7378                                                              attr, error);
7379                         if (ret < 0)
7380                                 return ret;
7381                         action_flags |= MLX5_FLOW_ACTION_DROP;
7382                         ++actions_n;
7383                         break;
7384                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7385                         ret = mlx5_flow_validate_action_queue(actions,
7386                                                               action_flags, dev,
7387                                                               attr, error);
7388                         if (ret < 0)
7389                                 return ret;
7390                         queue_index = ((const struct rte_flow_action_queue *)
7391                                                         (actions->conf))->index;
7392                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7393                         ++actions_n;
7394                         break;
7395                 case RTE_FLOW_ACTION_TYPE_RSS:
7396                         rss = actions->conf;
7397                         ret = mlx5_flow_validate_action_rss(actions,
7398                                                             action_flags, dev,
7399                                                             attr, item_flags,
7400                                                             error);
7401                         if (ret < 0)
7402                                 return ret;
7403                         if (rss && sample_rss &&
7404                             (sample_rss->level != rss->level ||
7405                             sample_rss->types != rss->types))
7406                                 return rte_flow_error_set(error, ENOTSUP,
7407                                         RTE_FLOW_ERROR_TYPE_ACTION,
7408                                         NULL,
7409                                         "Can't use the different RSS types "
7410                                         "or level in the same flow");
7411                         if (rss != NULL && rss->queue_num)
7412                                 queue_index = rss->queue[0];
7413                         action_flags |= MLX5_FLOW_ACTION_RSS;
7414                         ++actions_n;
7415                         break;
7416                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7417                         ret =
7418                         mlx5_flow_validate_action_default_miss(action_flags,
7419                                         attr, error);
7420                         if (ret < 0)
7421                                 return ret;
7422                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7423                         ++actions_n;
7424                         break;
7425                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7426                         shared_count = true;
7427                         /* fall-through. */
7428                 case RTE_FLOW_ACTION_TYPE_COUNT:
7429                         ret = flow_dv_validate_action_count(dev, shared_count,
7430                                                             action_flags,
7431                                                             error);
7432                         if (ret < 0)
7433                                 return ret;
7434                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7435                         ++actions_n;
7436                         break;
7437                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7438                         if (flow_dv_validate_action_pop_vlan(dev,
7439                                                              action_flags,
7440                                                              actions,
7441                                                              item_flags, attr,
7442                                                              error))
7443                                 return -rte_errno;
7444                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7445                                 modify_after_mirror = 1;
7446                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7447                         ++actions_n;
7448                         break;
7449                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7450                         ret = flow_dv_validate_action_push_vlan(dev,
7451                                                                 action_flags,
7452                                                                 vlan_m,
7453                                                                 actions, attr,
7454                                                                 error);
7455                         if (ret < 0)
7456                                 return ret;
7457                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7458                                 modify_after_mirror = 1;
7459                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7460                         ++actions_n;
7461                         break;
7462                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7463                         ret = flow_dv_validate_action_set_vlan_pcp
7464                                                 (action_flags, actions, error);
7465                         if (ret < 0)
7466                                 return ret;
7467                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7468                                 modify_after_mirror = 1;
7469                         /* Count PCP with push_vlan command. */
7470                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7471                         break;
7472                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7473                         ret = flow_dv_validate_action_set_vlan_vid
7474                                                 (item_flags, action_flags,
7475                                                  actions, error);
7476                         if (ret < 0)
7477                                 return ret;
7478                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7479                                 modify_after_mirror = 1;
7480                         /* Count VID with push_vlan command. */
7481                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7482                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7483                         break;
7484                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7485                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7486                         ret = flow_dv_validate_action_l2_encap(dev,
7487                                                                action_flags,
7488                                                                actions, attr,
7489                                                                error);
7490                         if (ret < 0)
7491                                 return ret;
7492                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7493                         ++actions_n;
7494                         break;
7495                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7496                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7497                         ret = flow_dv_validate_action_decap(dev, action_flags,
7498                                                             actions, item_flags,
7499                                                             attr, error);
7500                         if (ret < 0)
7501                                 return ret;
7502                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7503                                 modify_after_mirror = 1;
7504                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7505                         ++actions_n;
7506                         break;
7507                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7508                         ret = flow_dv_validate_action_raw_encap_decap
7509                                 (dev, NULL, actions->conf, attr, &action_flags,
7510                                  &actions_n, actions, item_flags, error);
7511                         if (ret < 0)
7512                                 return ret;
7513                         break;
7514                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7515                         decap = actions->conf;
7516                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7517                                 ;
7518                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7519                                 encap = NULL;
7520                                 actions--;
7521                         } else {
7522                                 encap = actions->conf;
7523                         }
7524                         ret = flow_dv_validate_action_raw_encap_decap
7525                                            (dev,
7526                                             decap ? decap : &empty_decap, encap,
7527                                             attr, &action_flags, &actions_n,
7528                                             actions, item_flags, error);
7529                         if (ret < 0)
7530                                 return ret;
7531                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7532                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7533                                 modify_after_mirror = 1;
7534                         break;
7535                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7536                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7537                         ret = flow_dv_validate_action_modify_mac(action_flags,
7538                                                                  actions,
7539                                                                  item_flags,
7540                                                                  error);
7541                         if (ret < 0)
7542                                 return ret;
7543                         /* Count all modify-header actions as one action. */
7544                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7545                                 ++actions_n;
7546                         action_flags |= actions->type ==
7547                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7548                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7549                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7550                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7551                                 modify_after_mirror = 1;
7552                         /*
7553                          * Even if the source and destination MAC addresses have
7554                          * overlap in the header with 4B alignment, the convert
7555                          * function will handle them separately and 4 SW actions
7556                          * will be created. And 2 actions will be added each
7557                          * time no matter how many bytes of address will be set.
7558                          */
7559                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7560                         break;
7561                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7562                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7563                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7564                                                                   actions,
7565                                                                   item_flags,
7566                                                                   error);
7567                         if (ret < 0)
7568                                 return ret;
7569                         /* Count all modify-header actions as one action. */
7570                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7571                                 ++actions_n;
7572                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7573                                 modify_after_mirror = 1;
7574                         action_flags |= actions->type ==
7575                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7576                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7577                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7578                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7579                         break;
7580                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7581                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7582                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7583                                                                   actions,
7584                                                                   item_flags,
7585                                                                   error);
7586                         if (ret < 0)
7587                                 return ret;
7588                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7589                                 return rte_flow_error_set(error, ENOTSUP,
7590                                         RTE_FLOW_ERROR_TYPE_ACTION,
7591                                         actions,
7592                                         "Can't change header "
7593                                         "with ICMPv6 proto");
7594                         /* Count all modify-header actions as one action. */
7595                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7596                                 ++actions_n;
7597                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7598                                 modify_after_mirror = 1;
7599                         action_flags |= actions->type ==
7600                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7601                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7602                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7603                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7604                         break;
7605                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7606                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7607                         ret = flow_dv_validate_action_modify_tp(action_flags,
7608                                                                 actions,
7609                                                                 item_flags,
7610                                                                 error);
7611                         if (ret < 0)
7612                                 return ret;
7613                         /* Count all modify-header actions as one action. */
7614                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7615                                 ++actions_n;
7616                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7617                                 modify_after_mirror = 1;
7618                         action_flags |= actions->type ==
7619                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7620                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7621                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7622                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7623                         break;
7624                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7625                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7626                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7627                                                                  actions,
7628                                                                  item_flags,
7629                                                                  error);
7630                         if (ret < 0)
7631                                 return ret;
7632                         /* Count all modify-header actions as one action. */
7633                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7634                                 ++actions_n;
7635                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7636                                 modify_after_mirror = 1;
7637                         action_flags |= actions->type ==
7638                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7639                                                 MLX5_FLOW_ACTION_SET_TTL :
7640                                                 MLX5_FLOW_ACTION_DEC_TTL;
7641                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7642                         break;
7643                 case RTE_FLOW_ACTION_TYPE_JUMP:
7644                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7645                                                            action_flags,
7646                                                            attr, external,
7647                                                            error);
7648                         if (ret)
7649                                 return ret;
7650                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7651                             fdb_mirror_limit)
7652                                 return rte_flow_error_set(error, EINVAL,
7653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7654                                                   NULL,
7655                                                   "sample and jump action combination is not supported");
7656                         ++actions_n;
7657                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7658                         break;
7659                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7660                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7661                         ret = flow_dv_validate_action_modify_tcp_seq
7662                                                                 (action_flags,
7663                                                                  actions,
7664                                                                  item_flags,
7665                                                                  error);
7666                         if (ret < 0)
7667                                 return ret;
7668                         /* Count all modify-header actions as one action. */
7669                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7670                                 ++actions_n;
7671                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7672                                 modify_after_mirror = 1;
7673                         action_flags |= actions->type ==
7674                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7675                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7676                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7677                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7678                         break;
7679                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7680                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7681                         ret = flow_dv_validate_action_modify_tcp_ack
7682                                                                 (action_flags,
7683                                                                  actions,
7684                                                                  item_flags,
7685                                                                  error);
7686                         if (ret < 0)
7687                                 return ret;
7688                         /* Count all modify-header actions as one action. */
7689                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7690                                 ++actions_n;
7691                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7692                                 modify_after_mirror = 1;
7693                         action_flags |= actions->type ==
7694                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7695                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7696                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7697                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7698                         break;
7699                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7700                         break;
7701                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7702                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7703                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7704                         break;
7705                 case RTE_FLOW_ACTION_TYPE_METER:
7706                         ret = mlx5_flow_validate_action_meter(dev,
7707                                                               action_flags,
7708                                                               item_flags,
7709                                                               actions, attr,
7710                                                               port_id_item,
7711                                                               &def_policy,
7712                                                               error);
7713                         if (ret < 0)
7714                                 return ret;
7715                         action_flags |= MLX5_FLOW_ACTION_METER;
7716                         if (!def_policy)
7717                                 action_flags |=
7718                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7719                         ++actions_n;
7720                         /* Meter action will add one more TAG action. */
7721                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7722                         break;
7723                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7724                         if (!attr->transfer && !attr->group)
7725                                 return rte_flow_error_set(error, ENOTSUP,
7726                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7727                                                                            NULL,
7728                           "Shared ASO age action is not supported for group 0");
7729                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7730                                 return rte_flow_error_set
7731                                                   (error, EINVAL,
7732                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7733                                                    NULL,
7734                                                    "duplicate age actions set");
7735                         action_flags |= MLX5_FLOW_ACTION_AGE;
7736                         ++actions_n;
7737                         break;
7738                 case RTE_FLOW_ACTION_TYPE_AGE:
7739                         ret = flow_dv_validate_action_age(action_flags,
7740                                                           actions, dev,
7741                                                           error);
7742                         if (ret < 0)
7743                                 return ret;
7744                         /*
7745                          * Validate the regular AGE action (using counter)
7746                          * mutual exclusion with share counter actions.
7747                          */
7748                         if (!priv->sh->flow_hit_aso_en) {
7749                                 if (shared_count)
7750                                         return rte_flow_error_set
7751                                                 (error, EINVAL,
7752                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7753                                                 NULL,
7754                                                 "old age and shared count combination is not supported");
7755                                 if (sample_count)
7756                                         return rte_flow_error_set
7757                                                 (error, EINVAL,
7758                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7759                                                 NULL,
7760                                                 "old age action and count must be in the same sub flow");
7761                         }
7762                         action_flags |= MLX5_FLOW_ACTION_AGE;
7763                         ++actions_n;
7764                         break;
7765                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7766                         ret = flow_dv_validate_action_modify_ipv4_dscp
7767                                                          (action_flags,
7768                                                           actions,
7769                                                           item_flags,
7770                                                           error);
7771                         if (ret < 0)
7772                                 return ret;
7773                         /* Count all modify-header actions as one action. */
7774                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7775                                 ++actions_n;
7776                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7777                                 modify_after_mirror = 1;
7778                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7779                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7780                         break;
7781                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7782                         ret = flow_dv_validate_action_modify_ipv6_dscp
7783                                                                 (action_flags,
7784                                                                  actions,
7785                                                                  item_flags,
7786                                                                  error);
7787                         if (ret < 0)
7788                                 return ret;
7789                         /* Count all modify-header actions as one action. */
7790                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7791                                 ++actions_n;
7792                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7793                                 modify_after_mirror = 1;
7794                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7795                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7796                         break;
7797                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7798                         ret = flow_dv_validate_action_sample(&action_flags,
7799                                                              actions, dev,
7800                                                              attr, item_flags,
7801                                                              rss, &sample_rss,
7802                                                              &sample_count,
7803                                                              &fdb_mirror_limit,
7804                                                              error);
7805                         if (ret < 0)
7806                                 return ret;
7807                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7808                         ++actions_n;
7809                         break;
7810                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7811                         ret = flow_dv_validate_action_modify_field(dev,
7812                                                                    action_flags,
7813                                                                    actions,
7814                                                                    attr,
7815                                                                    error);
7816                         if (ret < 0)
7817                                 return ret;
7818                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7819                                 modify_after_mirror = 1;
7820                         /* Count all modify-header actions as one action. */
7821                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7822                                 ++actions_n;
7823                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7824                         rw_act_num += ret;
7825                         break;
7826                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7827                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7828                                                              item_flags, attr,
7829                                                              error);
7830                         if (ret < 0)
7831                                 return ret;
7832                         action_flags |= MLX5_FLOW_ACTION_CT;
7833                         break;
7834                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7835                         /* tunnel offload action was processed before
7836                          * list it here as a supported type
7837                          */
7838                         break;
7839                 default:
7840                         return rte_flow_error_set(error, ENOTSUP,
7841                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7842                                                   actions,
7843                                                   "action not supported");
7844                 }
7845         }
7846         /*
7847          * Validate actions in flow rules
7848          * - Explicit decap action is prohibited by the tunnel offload API.
7849          * - Drop action in tunnel steer rule is prohibited by the API.
7850          * - Application cannot use MARK action because it's value can mask
7851          *   tunnel default miss notification.
7852          * - JUMP in tunnel match rule has no support in current PMD
7853          *   implementation.
7854          * - TAG & META are reserved for future uses.
7855          */
7856         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7857                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7858                                             MLX5_FLOW_ACTION_MARK     |
7859                                             MLX5_FLOW_ACTION_SET_TAG  |
7860                                             MLX5_FLOW_ACTION_SET_META |
7861                                             MLX5_FLOW_ACTION_DROP;
7862
7863                 if (action_flags & bad_actions_mask)
7864                         return rte_flow_error_set
7865                                         (error, EINVAL,
7866                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7867                                         "Invalid RTE action in tunnel "
7868                                         "set decap rule");
7869                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7870                         return rte_flow_error_set
7871                                         (error, EINVAL,
7872                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7873                                         "tunnel set decap rule must terminate "
7874                                         "with JUMP");
7875                 if (!attr->ingress)
7876                         return rte_flow_error_set
7877                                         (error, EINVAL,
7878                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7879                                         "tunnel flows for ingress traffic only");
7880         }
7881         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7882                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7883                                             MLX5_FLOW_ACTION_MARK    |
7884                                             MLX5_FLOW_ACTION_SET_TAG |
7885                                             MLX5_FLOW_ACTION_SET_META;
7886
7887                 if (action_flags & bad_actions_mask)
7888                         return rte_flow_error_set
7889                                         (error, EINVAL,
7890                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7891                                         "Invalid RTE action in tunnel "
7892                                         "set match rule");
7893         }
7894         /*
7895          * Validate the drop action mutual exclusion with other actions.
7896          * Drop action is mutually-exclusive with any other action, except for
7897          * Count action.
7898          * Drop action compatibility with tunnel offload was already validated.
7899          */
7900         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7901                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7902         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7903             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7904                 return rte_flow_error_set(error, EINVAL,
7905                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7906                                           "Drop action is mutually-exclusive "
7907                                           "with any other action, except for "
7908                                           "Count action");
7909         /* Eswitch has few restrictions on using items and actions */
7910         if (attr->transfer) {
7911                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7912                     action_flags & MLX5_FLOW_ACTION_FLAG)
7913                         return rte_flow_error_set(error, ENOTSUP,
7914                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7915                                                   NULL,
7916                                                   "unsupported action FLAG");
7917                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7918                     action_flags & MLX5_FLOW_ACTION_MARK)
7919                         return rte_flow_error_set(error, ENOTSUP,
7920                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7921                                                   NULL,
7922                                                   "unsupported action MARK");
7923                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7924                         return rte_flow_error_set(error, ENOTSUP,
7925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7926                                                   NULL,
7927                                                   "unsupported action QUEUE");
7928                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7929                         return rte_flow_error_set(error, ENOTSUP,
7930                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7931                                                   NULL,
7932                                                   "unsupported action RSS");
7933                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7934                         return rte_flow_error_set(error, EINVAL,
7935                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7936                                                   actions,
7937                                                   "no fate action is found");
7938         } else {
7939                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7940                         return rte_flow_error_set(error, EINVAL,
7941                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7942                                                   actions,
7943                                                   "no fate action is found");
7944         }
7945         /*
7946          * Continue validation for Xcap and VLAN actions.
7947          * If hairpin is working in explicit TX rule mode, there is no actions
7948          * splitting and the validation of hairpin ingress flow should be the
7949          * same as other standard flows.
7950          */
7951         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7952                              MLX5_FLOW_VLAN_ACTIONS)) &&
7953             (queue_index == 0xFFFF ||
7954              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7955              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7956              conf->tx_explicit != 0))) {
7957                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7958                     MLX5_FLOW_XCAP_ACTIONS)
7959                         return rte_flow_error_set(error, ENOTSUP,
7960                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7961                                                   NULL, "encap and decap "
7962                                                   "combination aren't supported");
7963                 if (!attr->transfer && attr->ingress) {
7964                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7965                                 return rte_flow_error_set
7966                                                 (error, ENOTSUP,
7967                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7968                                                  NULL, "encap is not supported"
7969                                                  " for ingress traffic");
7970                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7971                                 return rte_flow_error_set
7972                                                 (error, ENOTSUP,
7973                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7974                                                  NULL, "push VLAN action not "
7975                                                  "supported for ingress");
7976                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7977                                         MLX5_FLOW_VLAN_ACTIONS)
7978                                 return rte_flow_error_set
7979                                                 (error, ENOTSUP,
7980                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7981                                                  NULL, "no support for "
7982                                                  "multiple VLAN actions");
7983                 }
7984         }
7985         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7986                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7987                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7988                         attr->ingress)
7989                         return rte_flow_error_set
7990                                 (error, ENOTSUP,
7991                                 RTE_FLOW_ERROR_TYPE_ACTION,
7992                                 NULL, "fate action not supported for "
7993                                 "meter with policy");
7994                 if (attr->egress) {
7995                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7996                                 return rte_flow_error_set
7997                                         (error, ENOTSUP,
7998                                         RTE_FLOW_ERROR_TYPE_ACTION,
7999                                         NULL, "modify header action in egress "
8000                                         "cannot be done before meter action");
8001                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8002                                 return rte_flow_error_set
8003                                         (error, ENOTSUP,
8004                                         RTE_FLOW_ERROR_TYPE_ACTION,
8005                                         NULL, "encap action in egress "
8006                                         "cannot be done before meter action");
8007                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8008                                 return rte_flow_error_set
8009                                         (error, ENOTSUP,
8010                                         RTE_FLOW_ERROR_TYPE_ACTION,
8011                                         NULL, "push vlan action in egress "
8012                                         "cannot be done before meter action");
8013                 }
8014         }
8015         /*
8016          * Hairpin flow will add one more TAG action in TX implicit mode.
8017          * In TX explicit mode, there will be no hairpin flow ID.
8018          */
8019         if (hairpin > 0)
8020                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8021         /* extra metadata enabled: one more TAG action will be add. */
8022         if (dev_conf->dv_flow_en &&
8023             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8024             mlx5_flow_ext_mreg_supported(dev))
8025                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8026         if (rw_act_num >
8027                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8028                 return rte_flow_error_set(error, ENOTSUP,
8029                                           RTE_FLOW_ERROR_TYPE_ACTION,
8030                                           NULL, "too many header modify"
8031                                           " actions to support");
8032         }
8033         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8034         if (fdb_mirror_limit && modify_after_mirror)
8035                 return rte_flow_error_set(error, EINVAL,
8036                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8037                                 "sample before modify action is not supported");
8038         return 0;
8039 }
8040
8041 /**
8042  * Internal preparation function. Allocates the DV flow size,
8043  * this size is constant.
8044  *
8045  * @param[in] dev
8046  *   Pointer to the rte_eth_dev structure.
8047  * @param[in] attr
8048  *   Pointer to the flow attributes.
8049  * @param[in] items
8050  *   Pointer to the list of items.
8051  * @param[in] actions
8052  *   Pointer to the list of actions.
8053  * @param[out] error
8054  *   Pointer to the error structure.
8055  *
8056  * @return
8057  *   Pointer to mlx5_flow object on success,
8058  *   otherwise NULL and rte_errno is set.
8059  */
8060 static struct mlx5_flow *
8061 flow_dv_prepare(struct rte_eth_dev *dev,
8062                 const struct rte_flow_attr *attr __rte_unused,
8063                 const struct rte_flow_item items[] __rte_unused,
8064                 const struct rte_flow_action actions[] __rte_unused,
8065                 struct rte_flow_error *error)
8066 {
8067         uint32_t handle_idx = 0;
8068         struct mlx5_flow *dev_flow;
8069         struct mlx5_flow_handle *dev_handle;
8070         struct mlx5_priv *priv = dev->data->dev_private;
8071         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8072
8073         MLX5_ASSERT(wks);
8074         wks->skip_matcher_reg = 0;
8075         wks->policy = NULL;
8076         wks->final_policy = NULL;
8077         /* In case of corrupting the memory. */
8078         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8079                 rte_flow_error_set(error, ENOSPC,
8080                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8081                                    "not free temporary device flow");
8082                 return NULL;
8083         }
8084         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8085                                    &handle_idx);
8086         if (!dev_handle) {
8087                 rte_flow_error_set(error, ENOMEM,
8088                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8089                                    "not enough memory to create flow handle");
8090                 return NULL;
8091         }
8092         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8093         dev_flow = &wks->flows[wks->flow_idx++];
8094         memset(dev_flow, 0, sizeof(*dev_flow));
8095         dev_flow->handle = dev_handle;
8096         dev_flow->handle_idx = handle_idx;
8097         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8098         dev_flow->ingress = attr->ingress;
8099         dev_flow->dv.transfer = attr->transfer;
8100         return dev_flow;
8101 }
8102
8103 #ifdef RTE_LIBRTE_MLX5_DEBUG
8104 /**
8105  * Sanity check for match mask and value. Similar to check_valid_spec() in
8106  * kernel driver. If unmasked bit is present in value, it returns failure.
8107  *
8108  * @param match_mask
8109  *   pointer to match mask buffer.
8110  * @param match_value
8111  *   pointer to match value buffer.
8112  *
8113  * @return
8114  *   0 if valid, -EINVAL otherwise.
8115  */
8116 static int
8117 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8118 {
8119         uint8_t *m = match_mask;
8120         uint8_t *v = match_value;
8121         unsigned int i;
8122
8123         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8124                 if (v[i] & ~m[i]) {
8125                         DRV_LOG(ERR,
8126                                 "match_value differs from match_criteria"
8127                                 " %p[%u] != %p[%u]",
8128                                 match_value, i, match_mask, i);
8129                         return -EINVAL;
8130                 }
8131         }
8132         return 0;
8133 }
8134 #endif
8135
8136 /**
8137  * Add match of ip_version.
8138  *
8139  * @param[in] group
8140  *   Flow group.
8141  * @param[in] headers_v
8142  *   Values header pointer.
8143  * @param[in] headers_m
8144  *   Masks header pointer.
8145  * @param[in] ip_version
8146  *   The IP version to set.
8147  */
8148 static inline void
8149 flow_dv_set_match_ip_version(uint32_t group,
8150                              void *headers_v,
8151                              void *headers_m,
8152                              uint8_t ip_version)
8153 {
8154         if (group == 0)
8155                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8156         else
8157                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8158                          ip_version);
8159         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8160         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8161         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8162 }
8163
8164 /**
8165  * Add Ethernet item to matcher and to the value.
8166  *
8167  * @param[in, out] matcher
8168  *   Flow matcher.
8169  * @param[in, out] key
8170  *   Flow matcher value.
8171  * @param[in] item
8172  *   Flow pattern to translate.
8173  * @param[in] inner
8174  *   Item is inner pattern.
8175  */
8176 static void
8177 flow_dv_translate_item_eth(void *matcher, void *key,
8178                            const struct rte_flow_item *item, int inner,
8179                            uint32_t group)
8180 {
8181         const struct rte_flow_item_eth *eth_m = item->mask;
8182         const struct rte_flow_item_eth *eth_v = item->spec;
8183         const struct rte_flow_item_eth nic_mask = {
8184                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8185                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8186                 .type = RTE_BE16(0xffff),
8187                 .has_vlan = 0,
8188         };
8189         void *hdrs_m;
8190         void *hdrs_v;
8191         char *l24_v;
8192         unsigned int i;
8193
8194         if (!eth_v)
8195                 return;
8196         if (!eth_m)
8197                 eth_m = &nic_mask;
8198         if (inner) {
8199                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8200                                          inner_headers);
8201                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8202         } else {
8203                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8204                                          outer_headers);
8205                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8206         }
8207         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8208                &eth_m->dst, sizeof(eth_m->dst));
8209         /* The value must be in the range of the mask. */
8210         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8211         for (i = 0; i < sizeof(eth_m->dst); ++i)
8212                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8213         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8214                &eth_m->src, sizeof(eth_m->src));
8215         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8216         /* The value must be in the range of the mask. */
8217         for (i = 0; i < sizeof(eth_m->dst); ++i)
8218                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8219         /*
8220          * HW supports match on one Ethertype, the Ethertype following the last
8221          * VLAN tag of the packet (see PRM).
8222          * Set match on ethertype only if ETH header is not followed by VLAN.
8223          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8224          * ethertype, and use ip_version field instead.
8225          * eCPRI over Ether layer will use type value 0xAEFE.
8226          */
8227         if (eth_m->type == 0xFFFF) {
8228                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8229                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8230                 switch (eth_v->type) {
8231                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8232                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8233                         return;
8234                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8235                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8236                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8237                         return;
8238                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8239                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8240                         return;
8241                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8242                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8243                         return;
8244                 default:
8245                         break;
8246                 }
8247         }
8248         if (eth_m->has_vlan) {
8249                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8250                 if (eth_v->has_vlan) {
8251                         /*
8252                          * Here, when also has_more_vlan field in VLAN item is
8253                          * not set, only single-tagged packets will be matched.
8254                          */
8255                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8256                         return;
8257                 }
8258         }
8259         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8260                  rte_be_to_cpu_16(eth_m->type));
8261         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8262         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8263 }
8264
8265 /**
8266  * Add VLAN item to matcher and to the value.
8267  *
8268  * @param[in, out] dev_flow
8269  *   Flow descriptor.
8270  * @param[in, out] matcher
8271  *   Flow matcher.
8272  * @param[in, out] key
8273  *   Flow matcher value.
8274  * @param[in] item
8275  *   Flow pattern to translate.
8276  * @param[in] inner
8277  *   Item is inner pattern.
8278  */
8279 static void
8280 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8281                             void *matcher, void *key,
8282                             const struct rte_flow_item *item,
8283                             int inner, uint32_t group)
8284 {
8285         const struct rte_flow_item_vlan *vlan_m = item->mask;
8286         const struct rte_flow_item_vlan *vlan_v = item->spec;
8287         void *hdrs_m;
8288         void *hdrs_v;
8289         uint16_t tci_m;
8290         uint16_t tci_v;
8291
8292         if (inner) {
8293                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8294                                          inner_headers);
8295                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8296         } else {
8297                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8298                                          outer_headers);
8299                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8300                 /*
8301                  * This is workaround, masks are not supported,
8302                  * and pre-validated.
8303                  */
8304                 if (vlan_v)
8305                         dev_flow->handle->vf_vlan.tag =
8306                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8307         }
8308         /*
8309          * When VLAN item exists in flow, mark packet as tagged,
8310          * even if TCI is not specified.
8311          */
8312         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8313                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8314                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8315         }
8316         if (!vlan_v)
8317                 return;
8318         if (!vlan_m)
8319                 vlan_m = &rte_flow_item_vlan_mask;
8320         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8321         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8322         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8323         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8324         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8325         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8326         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8327         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8328         /*
8329          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8330          * ethertype, and use ip_version field instead.
8331          */
8332         if (vlan_m->inner_type == 0xFFFF) {
8333                 switch (vlan_v->inner_type) {
8334                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8335                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8336                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8337                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8338                         return;
8339                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8340                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8341                         return;
8342                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8343                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8344                         return;
8345                 default:
8346                         break;
8347                 }
8348         }
8349         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8350                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8351                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8352                 /* Only one vlan_tag bit can be set. */
8353                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8354                 return;
8355         }
8356         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8357                  rte_be_to_cpu_16(vlan_m->inner_type));
8358         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8359                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8360 }
8361
8362 /**
8363  * Add IPV4 item to matcher and to the value.
8364  *
8365  * @param[in, out] matcher
8366  *   Flow matcher.
8367  * @param[in, out] key
8368  *   Flow matcher value.
8369  * @param[in] item
8370  *   Flow pattern to translate.
8371  * @param[in] inner
8372  *   Item is inner pattern.
8373  * @param[in] group
8374  *   The group to insert the rule.
8375  */
8376 static void
8377 flow_dv_translate_item_ipv4(void *matcher, void *key,
8378                             const struct rte_flow_item *item,
8379                             int inner, uint32_t group)
8380 {
8381         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8382         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8383         const struct rte_flow_item_ipv4 nic_mask = {
8384                 .hdr = {
8385                         .src_addr = RTE_BE32(0xffffffff),
8386                         .dst_addr = RTE_BE32(0xffffffff),
8387                         .type_of_service = 0xff,
8388                         .next_proto_id = 0xff,
8389                         .time_to_live = 0xff,
8390                 },
8391         };
8392         void *headers_m;
8393         void *headers_v;
8394         char *l24_m;
8395         char *l24_v;
8396         uint8_t tos, ihl_m, ihl_v;
8397
8398         if (inner) {
8399                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8400                                          inner_headers);
8401                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8402         } else {
8403                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8404                                          outer_headers);
8405                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8406         }
8407         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8408         if (!ipv4_v)
8409                 return;
8410         if (!ipv4_m)
8411                 ipv4_m = &nic_mask;
8412         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8413                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8414         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8415                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8416         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8417         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8418         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8419                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8420         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8421                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8422         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8423         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8424         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8425         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8426         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8427         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8428         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8429         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8430                  ipv4_m->hdr.type_of_service);
8431         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8432         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8433                  ipv4_m->hdr.type_of_service >> 2);
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8436                  ipv4_m->hdr.next_proto_id);
8437         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8438                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8439         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8440                  ipv4_m->hdr.time_to_live);
8441         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8442                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8443         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8444                  !!(ipv4_m->hdr.fragment_offset));
8445         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8446                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8447 }
8448
8449 /**
8450  * Add IPV6 item to matcher and to the value.
8451  *
8452  * @param[in, out] matcher
8453  *   Flow matcher.
8454  * @param[in, out] key
8455  *   Flow matcher value.
8456  * @param[in] item
8457  *   Flow pattern to translate.
8458  * @param[in] inner
8459  *   Item is inner pattern.
8460  * @param[in] group
8461  *   The group to insert the rule.
8462  */
8463 static void
8464 flow_dv_translate_item_ipv6(void *matcher, void *key,
8465                             const struct rte_flow_item *item,
8466                             int inner, uint32_t group)
8467 {
8468         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8469         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8470         const struct rte_flow_item_ipv6 nic_mask = {
8471                 .hdr = {
8472                         .src_addr =
8473                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8474                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8475                         .dst_addr =
8476                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8477                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8478                         .vtc_flow = RTE_BE32(0xffffffff),
8479                         .proto = 0xff,
8480                         .hop_limits = 0xff,
8481                 },
8482         };
8483         void *headers_m;
8484         void *headers_v;
8485         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8486         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8487         char *l24_m;
8488         char *l24_v;
8489         uint32_t vtc_m;
8490         uint32_t vtc_v;
8491         int i;
8492         int size;
8493
8494         if (inner) {
8495                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8496                                          inner_headers);
8497                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8498         } else {
8499                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8500                                          outer_headers);
8501                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8502         }
8503         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8504         if (!ipv6_v)
8505                 return;
8506         if (!ipv6_m)
8507                 ipv6_m = &nic_mask;
8508         size = sizeof(ipv6_m->hdr.dst_addr);
8509         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8510                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8511         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8512                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8513         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8514         for (i = 0; i < size; ++i)
8515                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8516         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8517                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8518         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8519                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8520         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8521         for (i = 0; i < size; ++i)
8522                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8523         /* TOS. */
8524         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8525         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8526         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8527         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8528         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8529         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8530         /* Label. */
8531         if (inner) {
8532                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8533                          vtc_m);
8534                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8535                          vtc_v);
8536         } else {
8537                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8538                          vtc_m);
8539                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8540                          vtc_v);
8541         }
8542         /* Protocol. */
8543         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8544                  ipv6_m->hdr.proto);
8545         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8546                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8547         /* Hop limit. */
8548         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8549                  ipv6_m->hdr.hop_limits);
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8551                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8552         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8553                  !!(ipv6_m->has_frag_ext));
8554         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8555                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8556 }
8557
8558 /**
8559  * Add IPV6 fragment extension item to matcher and to the value.
8560  *
8561  * @param[in, out] matcher
8562  *   Flow matcher.
8563  * @param[in, out] key
8564  *   Flow matcher value.
8565  * @param[in] item
8566  *   Flow pattern to translate.
8567  * @param[in] inner
8568  *   Item is inner pattern.
8569  */
8570 static void
8571 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8572                                      const struct rte_flow_item *item,
8573                                      int inner)
8574 {
8575         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8576         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8577         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8578                 .hdr = {
8579                         .next_header = 0xff,
8580                         .frag_data = RTE_BE16(0xffff),
8581                 },
8582         };
8583         void *headers_m;
8584         void *headers_v;
8585
8586         if (inner) {
8587                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8588                                          inner_headers);
8589                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8590         } else {
8591                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8592                                          outer_headers);
8593                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8594         }
8595         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8596         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8597         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8598         if (!ipv6_frag_ext_v)
8599                 return;
8600         if (!ipv6_frag_ext_m)
8601                 ipv6_frag_ext_m = &nic_mask;
8602         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8603                  ipv6_frag_ext_m->hdr.next_header);
8604         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8605                  ipv6_frag_ext_v->hdr.next_header &
8606                  ipv6_frag_ext_m->hdr.next_header);
8607 }
8608
8609 /**
8610  * Add TCP item to matcher and to the value.
8611  *
8612  * @param[in, out] matcher
8613  *   Flow matcher.
8614  * @param[in, out] key
8615  *   Flow matcher value.
8616  * @param[in] item
8617  *   Flow pattern to translate.
8618  * @param[in] inner
8619  *   Item is inner pattern.
8620  */
8621 static void
8622 flow_dv_translate_item_tcp(void *matcher, void *key,
8623                            const struct rte_flow_item *item,
8624                            int inner)
8625 {
8626         const struct rte_flow_item_tcp *tcp_m = item->mask;
8627         const struct rte_flow_item_tcp *tcp_v = item->spec;
8628         void *headers_m;
8629         void *headers_v;
8630
8631         if (inner) {
8632                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8633                                          inner_headers);
8634                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8635         } else {
8636                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8637                                          outer_headers);
8638                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8639         }
8640         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8641         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8642         if (!tcp_v)
8643                 return;
8644         if (!tcp_m)
8645                 tcp_m = &rte_flow_item_tcp_mask;
8646         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8647                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8648         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8649                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8650         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8651                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8652         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8653                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8654         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8655                  tcp_m->hdr.tcp_flags);
8656         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8657                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8658 }
8659
8660 /**
8661  * Add UDP item to matcher and to the value.
8662  *
8663  * @param[in, out] matcher
8664  *   Flow matcher.
8665  * @param[in, out] key
8666  *   Flow matcher value.
8667  * @param[in] item
8668  *   Flow pattern to translate.
8669  * @param[in] inner
8670  *   Item is inner pattern.
8671  */
8672 static void
8673 flow_dv_translate_item_udp(void *matcher, void *key,
8674                            const struct rte_flow_item *item,
8675                            int inner)
8676 {
8677         const struct rte_flow_item_udp *udp_m = item->mask;
8678         const struct rte_flow_item_udp *udp_v = item->spec;
8679         void *headers_m;
8680         void *headers_v;
8681
8682         if (inner) {
8683                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8684                                          inner_headers);
8685                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8686         } else {
8687                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8688                                          outer_headers);
8689                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8690         }
8691         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8692         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8693         if (!udp_v)
8694                 return;
8695         if (!udp_m)
8696                 udp_m = &rte_flow_item_udp_mask;
8697         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8698                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8699         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8700                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8701         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8702                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8704                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8705 }
8706
8707 /**
8708  * Add GRE optional Key item to matcher and to the value.
8709  *
8710  * @param[in, out] matcher
8711  *   Flow matcher.
8712  * @param[in, out] key
8713  *   Flow matcher value.
8714  * @param[in] item
8715  *   Flow pattern to translate.
8716  * @param[in] inner
8717  *   Item is inner pattern.
8718  */
8719 static void
8720 flow_dv_translate_item_gre_key(void *matcher, void *key,
8721                                    const struct rte_flow_item *item)
8722 {
8723         const rte_be32_t *key_m = item->mask;
8724         const rte_be32_t *key_v = item->spec;
8725         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8726         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8727         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8728
8729         /* GRE K bit must be on and should already be validated */
8730         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8731         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8732         if (!key_v)
8733                 return;
8734         if (!key_m)
8735                 key_m = &gre_key_default_mask;
8736         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8737                  rte_be_to_cpu_32(*key_m) >> 8);
8738         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8739                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8740         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8741                  rte_be_to_cpu_32(*key_m) & 0xFF);
8742         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8743                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8744 }
8745
8746 /**
8747  * Add GRE item to matcher and to the value.
8748  *
8749  * @param[in, out] matcher
8750  *   Flow matcher.
8751  * @param[in, out] key
8752  *   Flow matcher value.
8753  * @param[in] item
8754  *   Flow pattern to translate.
8755  * @param[in] pattern_flags
8756  *   Accumulated pattern flags.
8757  */
8758 static void
8759 flow_dv_translate_item_gre(void *matcher, void *key,
8760                            const struct rte_flow_item *item,
8761                            uint64_t pattern_flags)
8762 {
8763         static const struct rte_flow_item_gre empty_gre = {0,};
8764         const struct rte_flow_item_gre *gre_m = item->mask;
8765         const struct rte_flow_item_gre *gre_v = item->spec;
8766         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8767         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8768         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8769         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8770         struct {
8771                 union {
8772                         __extension__
8773                         struct {
8774                                 uint16_t version:3;
8775                                 uint16_t rsvd0:9;
8776                                 uint16_t s_present:1;
8777                                 uint16_t k_present:1;
8778                                 uint16_t rsvd_bit1:1;
8779                                 uint16_t c_present:1;
8780                         };
8781                         uint16_t value;
8782                 };
8783         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8784         uint16_t protocol_m, protocol_v;
8785
8786         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8787         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8788         if (!gre_v) {
8789                 gre_v = &empty_gre;
8790                 gre_m = &empty_gre;
8791         } else {
8792                 if (!gre_m)
8793                         gre_m = &rte_flow_item_gre_mask;
8794         }
8795         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8796         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8797         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8798                  gre_crks_rsvd0_ver_m.c_present);
8799         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8800                  gre_crks_rsvd0_ver_v.c_present &
8801                  gre_crks_rsvd0_ver_m.c_present);
8802         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8803                  gre_crks_rsvd0_ver_m.k_present);
8804         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8805                  gre_crks_rsvd0_ver_v.k_present &
8806                  gre_crks_rsvd0_ver_m.k_present);
8807         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8808                  gre_crks_rsvd0_ver_m.s_present);
8809         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8810                  gre_crks_rsvd0_ver_v.s_present &
8811                  gre_crks_rsvd0_ver_m.s_present);
8812         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8813         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8814         if (!protocol_m) {
8815                 /* Force next protocol to prevent matchers duplication */
8816                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8817                 if (protocol_v)
8818                         protocol_m = 0xFFFF;
8819         }
8820         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8821         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8822                  protocol_m & protocol_v);
8823 }
8824
8825 /**
8826  * Add NVGRE item to matcher and to the value.
8827  *
8828  * @param[in, out] matcher
8829  *   Flow matcher.
8830  * @param[in, out] key
8831  *   Flow matcher value.
8832  * @param[in] item
8833  *   Flow pattern to translate.
8834  * @param[in] pattern_flags
8835  *   Accumulated pattern flags.
8836  */
8837 static void
8838 flow_dv_translate_item_nvgre(void *matcher, void *key,
8839                              const struct rte_flow_item *item,
8840                              unsigned long pattern_flags)
8841 {
8842         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8843         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8844         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8845         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8846         const char *tni_flow_id_m;
8847         const char *tni_flow_id_v;
8848         char *gre_key_m;
8849         char *gre_key_v;
8850         int size;
8851         int i;
8852
8853         /* For NVGRE, GRE header fields must be set with defined values. */
8854         const struct rte_flow_item_gre gre_spec = {
8855                 .c_rsvd0_ver = RTE_BE16(0x2000),
8856                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8857         };
8858         const struct rte_flow_item_gre gre_mask = {
8859                 .c_rsvd0_ver = RTE_BE16(0xB000),
8860                 .protocol = RTE_BE16(UINT16_MAX),
8861         };
8862         const struct rte_flow_item gre_item = {
8863                 .spec = &gre_spec,
8864                 .mask = &gre_mask,
8865                 .last = NULL,
8866         };
8867         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
8868         if (!nvgre_v)
8869                 return;
8870         if (!nvgre_m)
8871                 nvgre_m = &rte_flow_item_nvgre_mask;
8872         tni_flow_id_m = (const char *)nvgre_m->tni;
8873         tni_flow_id_v = (const char *)nvgre_v->tni;
8874         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8875         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8876         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8877         memcpy(gre_key_m, tni_flow_id_m, size);
8878         for (i = 0; i < size; ++i)
8879                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8880 }
8881
8882 /**
8883  * Add VXLAN item to matcher and to the value.
8884  *
8885  * @param[in] dev
8886  *   Pointer to the Ethernet device structure.
8887  * @param[in] attr
8888  *   Flow rule attributes.
8889  * @param[in, out] matcher
8890  *   Flow matcher.
8891  * @param[in, out] key
8892  *   Flow matcher value.
8893  * @param[in] item
8894  *   Flow pattern to translate.
8895  * @param[in] inner
8896  *   Item is inner pattern.
8897  */
8898 static void
8899 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8900                              const struct rte_flow_attr *attr,
8901                              void *matcher, void *key,
8902                              const struct rte_flow_item *item,
8903                              int inner)
8904 {
8905         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8906         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8907         void *headers_m;
8908         void *headers_v;
8909         void *misc5_m;
8910         void *misc5_v;
8911         uint32_t *tunnel_header_v;
8912         uint32_t *tunnel_header_m;
8913         uint16_t dport;
8914         struct mlx5_priv *priv = dev->data->dev_private;
8915         const struct rte_flow_item_vxlan nic_mask = {
8916                 .vni = "\xff\xff\xff",
8917                 .rsvd1 = 0xff,
8918         };
8919
8920         if (inner) {
8921                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8922                                          inner_headers);
8923                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8924         } else {
8925                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8926                                          outer_headers);
8927                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8928         }
8929         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8930                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8931         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8932                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8933                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8934         }
8935         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8936         if (!vxlan_v)
8937                 return;
8938         if (!vxlan_m) {
8939                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8940                     (attr->group && !priv->sh->misc5_cap))
8941                         vxlan_m = &rte_flow_item_vxlan_mask;
8942                 else
8943                         vxlan_m = &nic_mask;
8944         }
8945         if ((priv->sh->steering_format_version ==
8946             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8947             dport != MLX5_UDP_PORT_VXLAN) ||
8948             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8949             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8950                 void *misc_m;
8951                 void *misc_v;
8952                 char *vni_m;
8953                 char *vni_v;
8954                 int size;
8955                 int i;
8956                 misc_m = MLX5_ADDR_OF(fte_match_param,
8957                                       matcher, misc_parameters);
8958                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8959                 size = sizeof(vxlan_m->vni);
8960                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8961                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8962                 memcpy(vni_m, vxlan_m->vni, size);
8963                 for (i = 0; i < size; ++i)
8964                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8965                 return;
8966         }
8967         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8968         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8969         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8970                                                    misc5_v,
8971                                                    tunnel_header_1);
8972         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8973                                                    misc5_m,
8974                                                    tunnel_header_1);
8975         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8976                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8977                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8978         if (*tunnel_header_v)
8979                 *tunnel_header_m = vxlan_m->vni[0] |
8980                         vxlan_m->vni[1] << 8 |
8981                         vxlan_m->vni[2] << 16;
8982         else
8983                 *tunnel_header_m = 0x0;
8984         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8985         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8986                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8987 }
8988
8989 /**
8990  * Add VXLAN-GPE item to matcher and to the value.
8991  *
8992  * @param[in, out] matcher
8993  *   Flow matcher.
8994  * @param[in, out] key
8995  *   Flow matcher value.
8996  * @param[in] item
8997  *   Flow pattern to translate.
8998  * @param[in] inner
8999  *   Item is inner pattern.
9000  */
9001
9002 static void
9003 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9004                                  const struct rte_flow_item *item,
9005                                  const uint64_t pattern_flags)
9006 {
9007         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9008         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9009         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9010         /* The item was validated to be on the outer side */
9011         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9012         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9013         void *misc_m =
9014                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9015         void *misc_v =
9016                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9017         char *vni_m =
9018                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9019         char *vni_v =
9020                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9021         int i, size = sizeof(vxlan_m->vni);
9022         uint8_t flags_m = 0xff;
9023         uint8_t flags_v = 0xc;
9024         uint8_t m_protocol, v_protocol;
9025
9026         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9027                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9028                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9029                          MLX5_UDP_PORT_VXLAN_GPE);
9030         }
9031         if (!vxlan_v) {
9032                 vxlan_v = &dummy_vxlan_gpe_hdr;
9033                 vxlan_m = &dummy_vxlan_gpe_hdr;
9034         } else {
9035                 if (!vxlan_m)
9036                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9037         }
9038         memcpy(vni_m, vxlan_m->vni, size);
9039         for (i = 0; i < size; ++i)
9040                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9041         if (vxlan_m->flags) {
9042                 flags_m = vxlan_m->flags;
9043                 flags_v = vxlan_v->flags;
9044         }
9045         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9046         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9047         m_protocol = vxlan_m->protocol;
9048         v_protocol = vxlan_v->protocol;
9049         if (!m_protocol) {
9050                 /* Force next protocol to ensure next headers parsing. */
9051                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9052                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9053                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9054                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9055                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9056                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9057                 if (v_protocol)
9058                         m_protocol = 0xFF;
9059         }
9060         MLX5_SET(fte_match_set_misc3, misc_m,
9061                  outer_vxlan_gpe_next_protocol, m_protocol);
9062         MLX5_SET(fte_match_set_misc3, misc_v,
9063                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9064 }
9065
9066 /**
9067  * Add Geneve item to matcher and to the value.
9068  *
9069  * @param[in, out] matcher
9070  *   Flow matcher.
9071  * @param[in, out] key
9072  *   Flow matcher value.
9073  * @param[in] item
9074  *   Flow pattern to translate.
9075  * @param[in] inner
9076  *   Item is inner pattern.
9077  */
9078
9079 static void
9080 flow_dv_translate_item_geneve(void *matcher, void *key,
9081                               const struct rte_flow_item *item,
9082                               uint64_t pattern_flags)
9083 {
9084         static const struct rte_flow_item_geneve empty_geneve = {0,};
9085         const struct rte_flow_item_geneve *geneve_m = item->mask;
9086         const struct rte_flow_item_geneve *geneve_v = item->spec;
9087         /* GENEVE flow item validation allows single tunnel item */
9088         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9089         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9090         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9091         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9092         uint16_t gbhdr_m;
9093         uint16_t gbhdr_v;
9094         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9095         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9096         size_t size = sizeof(geneve_m->vni), i;
9097         uint16_t protocol_m, protocol_v;
9098
9099         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9100                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9101                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9102                          MLX5_UDP_PORT_GENEVE);
9103         }
9104         if (!geneve_v) {
9105                 geneve_v = &empty_geneve;
9106                 geneve_m = &empty_geneve;
9107         } else {
9108                 if (!geneve_m)
9109                         geneve_m = &rte_flow_item_geneve_mask;
9110         }
9111         memcpy(vni_m, geneve_m->vni, size);
9112         for (i = 0; i < size; ++i)
9113                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9114         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9115         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9116         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9117                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9118         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9119                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9120         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9121                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9122         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9123                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9124                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9125         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9126         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9127         if (!protocol_m) {
9128                 /* Force next protocol to prevent matchers duplication */
9129                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9130                 if (protocol_v)
9131                         protocol_m = 0xFFFF;
9132         }
9133         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9134         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9135                  protocol_m & protocol_v);
9136 }
9137
9138 /**
9139  * Create Geneve TLV option resource.
9140  *
9141  * @param dev[in, out]
9142  *   Pointer to rte_eth_dev structure.
9143  * @param[in, out] tag_be24
9144  *   Tag value in big endian then R-shift 8.
9145  * @parm[in, out] dev_flow
9146  *   Pointer to the dev_flow.
9147  * @param[out] error
9148  *   pointer to error structure.
9149  *
9150  * @return
9151  *   0 on success otherwise -errno and errno is set.
9152  */
9153
9154 int
9155 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9156                                              const struct rte_flow_item *item,
9157                                              struct rte_flow_error *error)
9158 {
9159         struct mlx5_priv *priv = dev->data->dev_private;
9160         struct mlx5_dev_ctx_shared *sh = priv->sh;
9161         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9162                         sh->geneve_tlv_option_resource;
9163         struct mlx5_devx_obj *obj;
9164         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9165         int ret = 0;
9166
9167         if (!geneve_opt_v)
9168                 return -1;
9169         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9170         if (geneve_opt_resource != NULL) {
9171                 if (geneve_opt_resource->option_class ==
9172                         geneve_opt_v->option_class &&
9173                         geneve_opt_resource->option_type ==
9174                         geneve_opt_v->option_type &&
9175                         geneve_opt_resource->length ==
9176                         geneve_opt_v->option_len) {
9177                         /* We already have GENEVE TLV option obj allocated. */
9178                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9179                                            __ATOMIC_RELAXED);
9180                 } else {
9181                         ret = rte_flow_error_set(error, ENOMEM,
9182                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9183                                 "Only one GENEVE TLV option supported");
9184                         goto exit;
9185                 }
9186         } else {
9187                 /* Create a GENEVE TLV object and resource. */
9188                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9189                                 geneve_opt_v->option_class,
9190                                 geneve_opt_v->option_type,
9191                                 geneve_opt_v->option_len);
9192                 if (!obj) {
9193                         ret = rte_flow_error_set(error, ENODATA,
9194                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9195                                 "Failed to create GENEVE TLV Devx object");
9196                         goto exit;
9197                 }
9198                 sh->geneve_tlv_option_resource =
9199                                 mlx5_malloc(MLX5_MEM_ZERO,
9200                                                 sizeof(*geneve_opt_resource),
9201                                                 0, SOCKET_ID_ANY);
9202                 if (!sh->geneve_tlv_option_resource) {
9203                         claim_zero(mlx5_devx_cmd_destroy(obj));
9204                         ret = rte_flow_error_set(error, ENOMEM,
9205                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9206                                 "GENEVE TLV object memory allocation failed");
9207                         goto exit;
9208                 }
9209                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9210                 geneve_opt_resource->obj = obj;
9211                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9212                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9213                 geneve_opt_resource->length = geneve_opt_v->option_len;
9214                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9215                                 __ATOMIC_RELAXED);
9216         }
9217 exit:
9218         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9219         return ret;
9220 }
9221
9222 /**
9223  * Add Geneve TLV option item to matcher.
9224  *
9225  * @param[in, out] dev
9226  *   Pointer to rte_eth_dev structure.
9227  * @param[in, out] matcher
9228  *   Flow matcher.
9229  * @param[in, out] key
9230  *   Flow matcher value.
9231  * @param[in] item
9232  *   Flow pattern to translate.
9233  * @param[out] error
9234  *   Pointer to error structure.
9235  */
9236 static int
9237 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9238                                   void *key, const struct rte_flow_item *item,
9239                                   struct rte_flow_error *error)
9240 {
9241         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9242         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9243         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9244         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9245         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9246                         misc_parameters_3);
9247         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9248         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9249         int ret = 0;
9250
9251         if (!geneve_opt_v)
9252                 return -1;
9253         if (!geneve_opt_m)
9254                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9255         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9256                                                            error);
9257         if (ret) {
9258                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9259                 return ret;
9260         }
9261         /*
9262          * Set the option length in GENEVE header if not requested.
9263          * The GENEVE TLV option length is expressed by the option length field
9264          * in the GENEVE header.
9265          * If the option length was not requested but the GENEVE TLV option item
9266          * is present we set the option length field implicitly.
9267          */
9268         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9269                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9270                          MLX5_GENEVE_OPTLEN_MASK);
9271                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9272                          geneve_opt_v->option_len + 1);
9273         }
9274         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9275         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9276         /* Set the data. */
9277         if (geneve_opt_v->data) {
9278                 memcpy(&opt_data_key, geneve_opt_v->data,
9279                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9280                                 sizeof(opt_data_key)));
9281                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9282                                 sizeof(opt_data_key));
9283                 memcpy(&opt_data_mask, geneve_opt_m->data,
9284                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9285                                 sizeof(opt_data_mask)));
9286                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9287                                 sizeof(opt_data_mask));
9288                 MLX5_SET(fte_match_set_misc3, misc3_m,
9289                                 geneve_tlv_option_0_data,
9290                                 rte_be_to_cpu_32(opt_data_mask));
9291                 MLX5_SET(fte_match_set_misc3, misc3_v,
9292                                 geneve_tlv_option_0_data,
9293                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9294         }
9295         return ret;
9296 }
9297
9298 /**
9299  * Add MPLS item to matcher and to the value.
9300  *
9301  * @param[in, out] matcher
9302  *   Flow matcher.
9303  * @param[in, out] key
9304  *   Flow matcher value.
9305  * @param[in] item
9306  *   Flow pattern to translate.
9307  * @param[in] prev_layer
9308  *   The protocol layer indicated in previous item.
9309  * @param[in] inner
9310  *   Item is inner pattern.
9311  */
9312 static void
9313 flow_dv_translate_item_mpls(void *matcher, void *key,
9314                             const struct rte_flow_item *item,
9315                             uint64_t prev_layer,
9316                             int inner)
9317 {
9318         const uint32_t *in_mpls_m = item->mask;
9319         const uint32_t *in_mpls_v = item->spec;
9320         uint32_t *out_mpls_m = 0;
9321         uint32_t *out_mpls_v = 0;
9322         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9323         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9324         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9325                                      misc_parameters_2);
9326         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9327         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9328         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9329
9330         switch (prev_layer) {
9331         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9332                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9333                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9334                                  0xffff);
9335                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9336                                  MLX5_UDP_PORT_MPLS);
9337                 }
9338                 break;
9339         case MLX5_FLOW_LAYER_GRE:
9340                 /* Fall-through. */
9341         case MLX5_FLOW_LAYER_GRE_KEY:
9342                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9343                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9344                                  0xffff);
9345                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9346                                  RTE_ETHER_TYPE_MPLS);
9347                 }
9348                 break;
9349         default:
9350                 break;
9351         }
9352         if (!in_mpls_v)
9353                 return;
9354         if (!in_mpls_m)
9355                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9356         switch (prev_layer) {
9357         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9358                 out_mpls_m =
9359                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9360                                                  outer_first_mpls_over_udp);
9361                 out_mpls_v =
9362                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9363                                                  outer_first_mpls_over_udp);
9364                 break;
9365         case MLX5_FLOW_LAYER_GRE:
9366                 out_mpls_m =
9367                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9368                                                  outer_first_mpls_over_gre);
9369                 out_mpls_v =
9370                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9371                                                  outer_first_mpls_over_gre);
9372                 break;
9373         default:
9374                 /* Inner MPLS not over GRE is not supported. */
9375                 if (!inner) {
9376                         out_mpls_m =
9377                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9378                                                          misc2_m,
9379                                                          outer_first_mpls);
9380                         out_mpls_v =
9381                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9382                                                          misc2_v,
9383                                                          outer_first_mpls);
9384                 }
9385                 break;
9386         }
9387         if (out_mpls_m && out_mpls_v) {
9388                 *out_mpls_m = *in_mpls_m;
9389                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9390         }
9391 }
9392
9393 /**
9394  * Add metadata register item to matcher
9395  *
9396  * @param[in, out] matcher
9397  *   Flow matcher.
9398  * @param[in, out] key
9399  *   Flow matcher value.
9400  * @param[in] reg_type
9401  *   Type of device metadata register
9402  * @param[in] value
9403  *   Register value
9404  * @param[in] mask
9405  *   Register mask
9406  */
9407 static void
9408 flow_dv_match_meta_reg(void *matcher, void *key,
9409                        enum modify_reg reg_type,
9410                        uint32_t data, uint32_t mask)
9411 {
9412         void *misc2_m =
9413                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9414         void *misc2_v =
9415                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9416         uint32_t temp;
9417
9418         data &= mask;
9419         switch (reg_type) {
9420         case REG_A:
9421                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9422                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9423                 break;
9424         case REG_B:
9425                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9426                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9427                 break;
9428         case REG_C_0:
9429                 /*
9430                  * The metadata register C0 field might be divided into
9431                  * source vport index and META item value, we should set
9432                  * this field according to specified mask, not as whole one.
9433                  */
9434                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9435                 temp |= mask;
9436                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9437                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9438                 temp &= ~mask;
9439                 temp |= data;
9440                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9441                 break;
9442         case REG_C_1:
9443                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9444                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9445                 break;
9446         case REG_C_2:
9447                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9448                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9449                 break;
9450         case REG_C_3:
9451                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9452                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9453                 break;
9454         case REG_C_4:
9455                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9456                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9457                 break;
9458         case REG_C_5:
9459                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9460                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9461                 break;
9462         case REG_C_6:
9463                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9464                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9465                 break;
9466         case REG_C_7:
9467                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9468                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9469                 break;
9470         default:
9471                 MLX5_ASSERT(false);
9472                 break;
9473         }
9474 }
9475
9476 /**
9477  * Add MARK item to matcher
9478  *
9479  * @param[in] dev
9480  *   The device to configure through.
9481  * @param[in, out] matcher
9482  *   Flow matcher.
9483  * @param[in, out] key
9484  *   Flow matcher value.
9485  * @param[in] item
9486  *   Flow pattern to translate.
9487  */
9488 static void
9489 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9490                             void *matcher, void *key,
9491                             const struct rte_flow_item *item)
9492 {
9493         struct mlx5_priv *priv = dev->data->dev_private;
9494         const struct rte_flow_item_mark *mark;
9495         uint32_t value;
9496         uint32_t mask;
9497
9498         mark = item->mask ? (const void *)item->mask :
9499                             &rte_flow_item_mark_mask;
9500         mask = mark->id & priv->sh->dv_mark_mask;
9501         mark = (const void *)item->spec;
9502         MLX5_ASSERT(mark);
9503         value = mark->id & priv->sh->dv_mark_mask & mask;
9504         if (mask) {
9505                 enum modify_reg reg;
9506
9507                 /* Get the metadata register index for the mark. */
9508                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9509                 MLX5_ASSERT(reg > 0);
9510                 if (reg == REG_C_0) {
9511                         struct mlx5_priv *priv = dev->data->dev_private;
9512                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9513                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9514
9515                         mask &= msk_c0;
9516                         mask <<= shl_c0;
9517                         value <<= shl_c0;
9518                 }
9519                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9520         }
9521 }
9522
9523 /**
9524  * Add META item to matcher
9525  *
9526  * @param[in] dev
9527  *   The devich to configure through.
9528  * @param[in, out] matcher
9529  *   Flow matcher.
9530  * @param[in, out] key
9531  *   Flow matcher value.
9532  * @param[in] attr
9533  *   Attributes of flow that includes this item.
9534  * @param[in] item
9535  *   Flow pattern to translate.
9536  */
9537 static void
9538 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9539                             void *matcher, void *key,
9540                             const struct rte_flow_attr *attr,
9541                             const struct rte_flow_item *item)
9542 {
9543         const struct rte_flow_item_meta *meta_m;
9544         const struct rte_flow_item_meta *meta_v;
9545
9546         meta_m = (const void *)item->mask;
9547         if (!meta_m)
9548                 meta_m = &rte_flow_item_meta_mask;
9549         meta_v = (const void *)item->spec;
9550         if (meta_v) {
9551                 int reg;
9552                 uint32_t value = meta_v->data;
9553                 uint32_t mask = meta_m->data;
9554
9555                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9556                 if (reg < 0)
9557                         return;
9558                 MLX5_ASSERT(reg != REG_NON);
9559                 if (reg == REG_C_0) {
9560                         struct mlx5_priv *priv = dev->data->dev_private;
9561                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9562                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9563
9564                         mask &= msk_c0;
9565                         mask <<= shl_c0;
9566                         value <<= shl_c0;
9567                 }
9568                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9569         }
9570 }
9571
9572 /**
9573  * Add vport metadata Reg C0 item to matcher
9574  *
9575  * @param[in, out] matcher
9576  *   Flow matcher.
9577  * @param[in, out] key
9578  *   Flow matcher value.
9579  * @param[in] reg
9580  *   Flow pattern to translate.
9581  */
9582 static void
9583 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9584                                   uint32_t value, uint32_t mask)
9585 {
9586         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9587 }
9588
9589 /**
9590  * Add tag item to matcher
9591  *
9592  * @param[in] dev
9593  *   The devich to configure through.
9594  * @param[in, out] matcher
9595  *   Flow matcher.
9596  * @param[in, out] key
9597  *   Flow matcher value.
9598  * @param[in] item
9599  *   Flow pattern to translate.
9600  */
9601 static void
9602 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9603                                 void *matcher, void *key,
9604                                 const struct rte_flow_item *item)
9605 {
9606         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9607         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9608         uint32_t mask, value;
9609
9610         MLX5_ASSERT(tag_v);
9611         value = tag_v->data;
9612         mask = tag_m ? tag_m->data : UINT32_MAX;
9613         if (tag_v->id == REG_C_0) {
9614                 struct mlx5_priv *priv = dev->data->dev_private;
9615                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9616                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9617
9618                 mask &= msk_c0;
9619                 mask <<= shl_c0;
9620                 value <<= shl_c0;
9621         }
9622         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9623 }
9624
9625 /**
9626  * Add TAG item to matcher
9627  *
9628  * @param[in] dev
9629  *   The devich to configure through.
9630  * @param[in, out] matcher
9631  *   Flow matcher.
9632  * @param[in, out] key
9633  *   Flow matcher value.
9634  * @param[in] item
9635  *   Flow pattern to translate.
9636  */
9637 static void
9638 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9639                            void *matcher, void *key,
9640                            const struct rte_flow_item *item)
9641 {
9642         const struct rte_flow_item_tag *tag_v = item->spec;
9643         const struct rte_flow_item_tag *tag_m = item->mask;
9644         enum modify_reg reg;
9645
9646         MLX5_ASSERT(tag_v);
9647         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9648         /* Get the metadata register index for the tag. */
9649         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9650         MLX5_ASSERT(reg > 0);
9651         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9652 }
9653
9654 /**
9655  * Add source vport match to the specified matcher.
9656  *
9657  * @param[in, out] matcher
9658  *   Flow matcher.
9659  * @param[in, out] key
9660  *   Flow matcher value.
9661  * @param[in] port
9662  *   Source vport value to match
9663  * @param[in] mask
9664  *   Mask
9665  */
9666 static void
9667 flow_dv_translate_item_source_vport(void *matcher, void *key,
9668                                     int16_t port, uint16_t mask)
9669 {
9670         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9671         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9672
9673         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9674         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9675 }
9676
9677 /**
9678  * Translate port-id item to eswitch match on  port-id.
9679  *
9680  * @param[in] dev
9681  *   The devich to configure through.
9682  * @param[in, out] matcher
9683  *   Flow matcher.
9684  * @param[in, out] key
9685  *   Flow matcher value.
9686  * @param[in] item
9687  *   Flow pattern to translate.
9688  * @param[in]
9689  *   Flow attributes.
9690  *
9691  * @return
9692  *   0 on success, a negative errno value otherwise.
9693  */
9694 static int
9695 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9696                                void *key, const struct rte_flow_item *item,
9697                                const struct rte_flow_attr *attr)
9698 {
9699         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9700         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9701         struct mlx5_priv *priv;
9702         uint16_t mask, id;
9703
9704         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9705                 flow_dv_translate_item_source_vport(matcher, key,
9706                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9707                 return 0;
9708         }
9709         mask = pid_m ? pid_m->id : 0xffff;
9710         id = pid_v ? pid_v->id : dev->data->port_id;
9711         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9712         if (!priv)
9713                 return -rte_errno;
9714         /*
9715          * Translate to vport field or to metadata, depending on mode.
9716          * Kernel can use either misc.source_port or half of C0 metadata
9717          * register.
9718          */
9719         if (priv->vport_meta_mask) {
9720                 /*
9721                  * Provide the hint for SW steering library
9722                  * to insert the flow into ingress domain and
9723                  * save the extra vport match.
9724                  */
9725                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9726                     priv->pf_bond < 0 && attr->transfer)
9727                         flow_dv_translate_item_source_vport
9728                                 (matcher, key, priv->vport_id, mask);
9729                 /*
9730                  * We should always set the vport metadata register,
9731                  * otherwise the SW steering library can drop
9732                  * the rule if wire vport metadata value is not zero,
9733                  * it depends on kernel configuration.
9734                  */
9735                 flow_dv_translate_item_meta_vport(matcher, key,
9736                                                   priv->vport_meta_tag,
9737                                                   priv->vport_meta_mask);
9738         } else {
9739                 flow_dv_translate_item_source_vport(matcher, key,
9740                                                     priv->vport_id, mask);
9741         }
9742         return 0;
9743 }
9744
9745 /**
9746  * Add ICMP6 item to matcher and to the value.
9747  *
9748  * @param[in, out] matcher
9749  *   Flow matcher.
9750  * @param[in, out] key
9751  *   Flow matcher value.
9752  * @param[in] item
9753  *   Flow pattern to translate.
9754  * @param[in] inner
9755  *   Item is inner pattern.
9756  */
9757 static void
9758 flow_dv_translate_item_icmp6(void *matcher, void *key,
9759                               const struct rte_flow_item *item,
9760                               int inner)
9761 {
9762         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9763         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9764         void *headers_m;
9765         void *headers_v;
9766         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9767                                      misc_parameters_3);
9768         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9769         if (inner) {
9770                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9771                                          inner_headers);
9772                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9773         } else {
9774                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9775                                          outer_headers);
9776                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9777         }
9778         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9779         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9780         if (!icmp6_v)
9781                 return;
9782         if (!icmp6_m)
9783                 icmp6_m = &rte_flow_item_icmp6_mask;
9784         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9785         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9786                  icmp6_v->type & icmp6_m->type);
9787         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9788         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9789                  icmp6_v->code & icmp6_m->code);
9790 }
9791
9792 /**
9793  * Add ICMP item to matcher and to the value.
9794  *
9795  * @param[in, out] matcher
9796  *   Flow matcher.
9797  * @param[in, out] key
9798  *   Flow matcher value.
9799  * @param[in] item
9800  *   Flow pattern to translate.
9801  * @param[in] inner
9802  *   Item is inner pattern.
9803  */
9804 static void
9805 flow_dv_translate_item_icmp(void *matcher, void *key,
9806                             const struct rte_flow_item *item,
9807                             int inner)
9808 {
9809         const struct rte_flow_item_icmp *icmp_m = item->mask;
9810         const struct rte_flow_item_icmp *icmp_v = item->spec;
9811         uint32_t icmp_header_data_m = 0;
9812         uint32_t icmp_header_data_v = 0;
9813         void *headers_m;
9814         void *headers_v;
9815         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9816                                      misc_parameters_3);
9817         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9818         if (inner) {
9819                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9820                                          inner_headers);
9821                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9822         } else {
9823                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9824                                          outer_headers);
9825                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9826         }
9827         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9828         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9829         if (!icmp_v)
9830                 return;
9831         if (!icmp_m)
9832                 icmp_m = &rte_flow_item_icmp_mask;
9833         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9834                  icmp_m->hdr.icmp_type);
9835         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9836                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9837         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9838                  icmp_m->hdr.icmp_code);
9839         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9840                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9841         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9842         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9843         if (icmp_header_data_m) {
9844                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9845                 icmp_header_data_v |=
9846                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9847                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9848                          icmp_header_data_m);
9849                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9850                          icmp_header_data_v & icmp_header_data_m);
9851         }
9852 }
9853
9854 /**
9855  * Add GTP item to matcher and to the value.
9856  *
9857  * @param[in, out] matcher
9858  *   Flow matcher.
9859  * @param[in, out] key
9860  *   Flow matcher value.
9861  * @param[in] item
9862  *   Flow pattern to translate.
9863  * @param[in] inner
9864  *   Item is inner pattern.
9865  */
9866 static void
9867 flow_dv_translate_item_gtp(void *matcher, void *key,
9868                            const struct rte_flow_item *item, int inner)
9869 {
9870         const struct rte_flow_item_gtp *gtp_m = item->mask;
9871         const struct rte_flow_item_gtp *gtp_v = item->spec;
9872         void *headers_m;
9873         void *headers_v;
9874         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9875                                      misc_parameters_3);
9876         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9877         uint16_t dport = RTE_GTPU_UDP_PORT;
9878
9879         if (inner) {
9880                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9881                                          inner_headers);
9882                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9883         } else {
9884                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9885                                          outer_headers);
9886                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9887         }
9888         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9889                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9890                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9891         }
9892         if (!gtp_v)
9893                 return;
9894         if (!gtp_m)
9895                 gtp_m = &rte_flow_item_gtp_mask;
9896         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9897                  gtp_m->v_pt_rsv_flags);
9898         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9899                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9900         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9901         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9902                  gtp_v->msg_type & gtp_m->msg_type);
9903         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9904                  rte_be_to_cpu_32(gtp_m->teid));
9905         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9906                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9907 }
9908
9909 /**
9910  * Add GTP PSC item to matcher.
9911  *
9912  * @param[in, out] matcher
9913  *   Flow matcher.
9914  * @param[in, out] key
9915  *   Flow matcher value.
9916  * @param[in] item
9917  *   Flow pattern to translate.
9918  */
9919 static int
9920 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9921                                const struct rte_flow_item *item)
9922 {
9923         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9924         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9925         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9926                         misc_parameters_3);
9927         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9928         union {
9929                 uint32_t w32;
9930                 struct {
9931                         uint16_t seq_num;
9932                         uint8_t npdu_num;
9933                         uint8_t next_ext_header_type;
9934                 };
9935         } dw_2;
9936         uint8_t gtp_flags;
9937
9938         /* Always set E-flag match on one, regardless of GTP item settings. */
9939         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9940         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9941         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9942         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9943         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9944         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9945         /*Set next extension header type. */
9946         dw_2.seq_num = 0;
9947         dw_2.npdu_num = 0;
9948         dw_2.next_ext_header_type = 0xff;
9949         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9950                  rte_cpu_to_be_32(dw_2.w32));
9951         dw_2.seq_num = 0;
9952         dw_2.npdu_num = 0;
9953         dw_2.next_ext_header_type = 0x85;
9954         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9955                  rte_cpu_to_be_32(dw_2.w32));
9956         if (gtp_psc_v) {
9957                 union {
9958                         uint32_t w32;
9959                         struct {
9960                                 uint8_t len;
9961                                 uint8_t type_flags;
9962                                 uint8_t qfi;
9963                                 uint8_t reserved;
9964                         };
9965                 } dw_0;
9966
9967                 /*Set extension header PDU type and Qos. */
9968                 if (!gtp_psc_m)
9969                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9970                 dw_0.w32 = 0;
9971                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9972                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9973                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9974                          rte_cpu_to_be_32(dw_0.w32));
9975                 dw_0.w32 = 0;
9976                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9977                                                         gtp_psc_m->hdr.type);
9978                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9979                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9980                          rte_cpu_to_be_32(dw_0.w32));
9981         }
9982         return 0;
9983 }
9984
9985 /**
9986  * Add eCPRI item to matcher and to the value.
9987  *
9988  * @param[in] dev
9989  *   The devich to configure through.
9990  * @param[in, out] matcher
9991  *   Flow matcher.
9992  * @param[in, out] key
9993  *   Flow matcher value.
9994  * @param[in] item
9995  *   Flow pattern to translate.
9996  * @param[in] last_item
9997  *   Last item flags.
9998  */
9999 static void
10000 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10001                              void *key, const struct rte_flow_item *item,
10002                              uint64_t last_item)
10003 {
10004         struct mlx5_priv *priv = dev->data->dev_private;
10005         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10006         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10007         struct rte_ecpri_common_hdr common;
10008         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10009                                      misc_parameters_4);
10010         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10011         uint32_t *samples;
10012         void *dw_m;
10013         void *dw_v;
10014
10015         /*
10016          * In case of eCPRI over Ethernet, if EtherType is not specified,
10017          * match on eCPRI EtherType implicitly.
10018          */
10019         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10020                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10021
10022                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10023                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10024                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10025                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10026                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10027                         *(uint16_t *)l2m = UINT16_MAX;
10028                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10029                 }
10030         }
10031         if (!ecpri_v)
10032                 return;
10033         if (!ecpri_m)
10034                 ecpri_m = &rte_flow_item_ecpri_mask;
10035         /*
10036          * Maximal four DW samples are supported in a single matching now.
10037          * Two are used now for a eCPRI matching:
10038          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10039          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10040          *    if any.
10041          */
10042         if (!ecpri_m->hdr.common.u32)
10043                 return;
10044         samples = priv->sh->ecpri_parser.ids;
10045         /* Need to take the whole DW as the mask to fill the entry. */
10046         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10047                             prog_sample_field_value_0);
10048         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10049                             prog_sample_field_value_0);
10050         /* Already big endian (network order) in the header. */
10051         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10052         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10053         /* Sample#0, used for matching type, offset 0. */
10054         MLX5_SET(fte_match_set_misc4, misc4_m,
10055                  prog_sample_field_id_0, samples[0]);
10056         /* It makes no sense to set the sample ID in the mask field. */
10057         MLX5_SET(fte_match_set_misc4, misc4_v,
10058                  prog_sample_field_id_0, samples[0]);
10059         /*
10060          * Checking if message body part needs to be matched.
10061          * Some wildcard rules only matching type field should be supported.
10062          */
10063         if (ecpri_m->hdr.dummy[0]) {
10064                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10065                 switch (common.type) {
10066                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10067                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10068                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10069                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10070                                             prog_sample_field_value_1);
10071                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10072                                             prog_sample_field_value_1);
10073                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10074                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10075                                             ecpri_m->hdr.dummy[0];
10076                         /* Sample#1, to match message body, offset 4. */
10077                         MLX5_SET(fte_match_set_misc4, misc4_m,
10078                                  prog_sample_field_id_1, samples[1]);
10079                         MLX5_SET(fte_match_set_misc4, misc4_v,
10080                                  prog_sample_field_id_1, samples[1]);
10081                         break;
10082                 default:
10083                         /* Others, do not match any sample ID. */
10084                         break;
10085                 }
10086         }
10087 }
10088
10089 /*
10090  * Add connection tracking status item to matcher
10091  *
10092  * @param[in] dev
10093  *   The devich to configure through.
10094  * @param[in, out] matcher
10095  *   Flow matcher.
10096  * @param[in, out] key
10097  *   Flow matcher value.
10098  * @param[in] item
10099  *   Flow pattern to translate.
10100  */
10101 static void
10102 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10103                               void *matcher, void *key,
10104                               const struct rte_flow_item *item)
10105 {
10106         uint32_t reg_value = 0;
10107         int reg_id;
10108         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10109         uint32_t reg_mask = 0;
10110         const struct rte_flow_item_conntrack *spec = item->spec;
10111         const struct rte_flow_item_conntrack *mask = item->mask;
10112         uint32_t flags;
10113         struct rte_flow_error error;
10114
10115         if (!mask)
10116                 mask = &rte_flow_item_conntrack_mask;
10117         if (!spec || !mask->flags)
10118                 return;
10119         flags = spec->flags & mask->flags;
10120         /* The conflict should be checked in the validation. */
10121         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10122                 reg_value |= MLX5_CT_SYNDROME_VALID;
10123         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10124                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10125         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10126                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10127         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10128                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10129         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10130                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10131         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10132                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10133                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10134                 reg_mask |= 0xc0;
10135         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10136                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10137         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10138                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10139         /* The REG_C_x value could be saved during startup. */
10140         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10141         if (reg_id == REG_NON)
10142                 return;
10143         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10144                                reg_value, reg_mask);
10145 }
10146
10147 static void
10148 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10149                             const struct rte_flow_item *item,
10150                             struct mlx5_flow *dev_flow, bool is_inner)
10151 {
10152         const struct rte_flow_item_flex *spec =
10153                 (const struct rte_flow_item_flex *)item->spec;
10154         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10155
10156         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10157         if (index < 0)
10158                 return;
10159         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10160                 /* Don't count both inner and outer flex items in one rule. */
10161                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10162                         MLX5_ASSERT(false);
10163                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10164         }
10165         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10166 }
10167
10168 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10169
10170 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10171         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10172                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10173
10174 /**
10175  * Calculate flow matcher enable bitmap.
10176  *
10177  * @param match_criteria
10178  *   Pointer to flow matcher criteria.
10179  *
10180  * @return
10181  *   Bitmap of enabled fields.
10182  */
10183 static uint8_t
10184 flow_dv_matcher_enable(uint32_t *match_criteria)
10185 {
10186         uint8_t match_criteria_enable;
10187
10188         match_criteria_enable =
10189                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10190                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10191         match_criteria_enable |=
10192                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10193                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10194         match_criteria_enable |=
10195                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10196                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10197         match_criteria_enable |=
10198                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10199                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10200         match_criteria_enable |=
10201                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10202                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10203         match_criteria_enable |=
10204                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10205                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10206         match_criteria_enable |=
10207                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10208                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10209         return match_criteria_enable;
10210 }
10211
10212 static void
10213 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10214 {
10215         /*
10216          * Check flow matching criteria first, subtract misc5/4 length if flow
10217          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10218          * misc5/4 are not supported, and matcher creation failure is expected
10219          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10220          * misc5 is right after misc4.
10221          */
10222         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10223                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10224                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10225                 if (!(match_criteria & (1 <<
10226                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10227                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10228                 }
10229         }
10230 }
10231
10232 static struct mlx5_list_entry *
10233 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10234                          struct mlx5_list_entry *entry, void *cb_ctx)
10235 {
10236         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10237         struct mlx5_flow_dv_matcher *ref = ctx->data;
10238         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10239                                                             typeof(*tbl), tbl);
10240         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10241                                                             sizeof(*resource),
10242                                                             0, SOCKET_ID_ANY);
10243
10244         if (!resource) {
10245                 rte_flow_error_set(ctx->error, ENOMEM,
10246                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10247                                    "cannot create matcher");
10248                 return NULL;
10249         }
10250         memcpy(resource, entry, sizeof(*resource));
10251         resource->tbl = &tbl->tbl;
10252         return &resource->entry;
10253 }
10254
10255 static void
10256 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10257                              struct mlx5_list_entry *entry)
10258 {
10259         mlx5_free(entry);
10260 }
10261
10262 struct mlx5_list_entry *
10263 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10264 {
10265         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10266         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10267         struct rte_eth_dev *dev = ctx->dev;
10268         struct mlx5_flow_tbl_data_entry *tbl_data;
10269         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10270         struct rte_flow_error *error = ctx->error;
10271         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10272         struct mlx5_flow_tbl_resource *tbl;
10273         void *domain;
10274         uint32_t idx = 0;
10275         int ret;
10276
10277         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10278         if (!tbl_data) {
10279                 rte_flow_error_set(error, ENOMEM,
10280                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10281                                    NULL,
10282                                    "cannot allocate flow table data entry");
10283                 return NULL;
10284         }
10285         tbl_data->idx = idx;
10286         tbl_data->tunnel = tt_prm->tunnel;
10287         tbl_data->group_id = tt_prm->group_id;
10288         tbl_data->external = !!tt_prm->external;
10289         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10290         tbl_data->is_egress = !!key.is_egress;
10291         tbl_data->is_transfer = !!key.is_fdb;
10292         tbl_data->dummy = !!key.dummy;
10293         tbl_data->level = key.level;
10294         tbl_data->id = key.id;
10295         tbl = &tbl_data->tbl;
10296         if (key.dummy)
10297                 return &tbl_data->entry;
10298         if (key.is_fdb)
10299                 domain = sh->fdb_domain;
10300         else if (key.is_egress)
10301                 domain = sh->tx_domain;
10302         else
10303                 domain = sh->rx_domain;
10304         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10305         if (ret) {
10306                 rte_flow_error_set(error, ENOMEM,
10307                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10308                                    NULL, "cannot create flow table object");
10309                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10310                 return NULL;
10311         }
10312         if (key.level != 0) {
10313                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10314                                         (tbl->obj, &tbl_data->jump.action);
10315                 if (ret) {
10316                         rte_flow_error_set(error, ENOMEM,
10317                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10318                                            NULL,
10319                                            "cannot create flow jump action");
10320                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10321                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10322                         return NULL;
10323                 }
10324         }
10325         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10326               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10327               key.level, key.id);
10328         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10329                                               flow_dv_matcher_create_cb,
10330                                               flow_dv_matcher_match_cb,
10331                                               flow_dv_matcher_remove_cb,
10332                                               flow_dv_matcher_clone_cb,
10333                                               flow_dv_matcher_clone_free_cb);
10334         if (!tbl_data->matchers) {
10335                 rte_flow_error_set(error, ENOMEM,
10336                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10337                                    NULL,
10338                                    "cannot create tbl matcher list");
10339                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10340                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10341                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10342                 return NULL;
10343         }
10344         return &tbl_data->entry;
10345 }
10346
10347 int
10348 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10349                      void *cb_ctx)
10350 {
10351         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10352         struct mlx5_flow_tbl_data_entry *tbl_data =
10353                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10354         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10355
10356         return tbl_data->level != key.level ||
10357                tbl_data->id != key.id ||
10358                tbl_data->dummy != key.dummy ||
10359                tbl_data->is_transfer != !!key.is_fdb ||
10360                tbl_data->is_egress != !!key.is_egress;
10361 }
10362
10363 struct mlx5_list_entry *
10364 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10365                       void *cb_ctx)
10366 {
10367         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10368         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10369         struct mlx5_flow_tbl_data_entry *tbl_data;
10370         struct rte_flow_error *error = ctx->error;
10371         uint32_t idx = 0;
10372
10373         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10374         if (!tbl_data) {
10375                 rte_flow_error_set(error, ENOMEM,
10376                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10377                                    NULL,
10378                                    "cannot allocate flow table data entry");
10379                 return NULL;
10380         }
10381         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10382         tbl_data->idx = idx;
10383         return &tbl_data->entry;
10384 }
10385
10386 void
10387 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10388 {
10389         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10390         struct mlx5_flow_tbl_data_entry *tbl_data =
10391                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10392
10393         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10394 }
10395
10396 /**
10397  * Get a flow table.
10398  *
10399  * @param[in, out] dev
10400  *   Pointer to rte_eth_dev structure.
10401  * @param[in] table_level
10402  *   Table level to use.
10403  * @param[in] egress
10404  *   Direction of the table.
10405  * @param[in] transfer
10406  *   E-Switch or NIC flow.
10407  * @param[in] dummy
10408  *   Dummy entry for dv API.
10409  * @param[in] table_id
10410  *   Table id to use.
10411  * @param[out] error
10412  *   pointer to error structure.
10413  *
10414  * @return
10415  *   Returns tables resource based on the index, NULL in case of failed.
10416  */
10417 struct mlx5_flow_tbl_resource *
10418 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10419                          uint32_t table_level, uint8_t egress,
10420                          uint8_t transfer,
10421                          bool external,
10422                          const struct mlx5_flow_tunnel *tunnel,
10423                          uint32_t group_id, uint8_t dummy,
10424                          uint32_t table_id,
10425                          struct rte_flow_error *error)
10426 {
10427         struct mlx5_priv *priv = dev->data->dev_private;
10428         union mlx5_flow_tbl_key table_key = {
10429                 {
10430                         .level = table_level,
10431                         .id = table_id,
10432                         .reserved = 0,
10433                         .dummy = !!dummy,
10434                         .is_fdb = !!transfer,
10435                         .is_egress = !!egress,
10436                 }
10437         };
10438         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10439                 .tunnel = tunnel,
10440                 .group_id = group_id,
10441                 .external = external,
10442         };
10443         struct mlx5_flow_cb_ctx ctx = {
10444                 .dev = dev,
10445                 .error = error,
10446                 .data = &table_key.v64,
10447                 .data2 = &tt_prm,
10448         };
10449         struct mlx5_list_entry *entry;
10450         struct mlx5_flow_tbl_data_entry *tbl_data;
10451
10452         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10453         if (!entry) {
10454                 rte_flow_error_set(error, ENOMEM,
10455                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10456                                    "cannot get table");
10457                 return NULL;
10458         }
10459         DRV_LOG(DEBUG, "table_level %u table_id %u "
10460                 "tunnel %u group %u registered.",
10461                 table_level, table_id,
10462                 tunnel ? tunnel->tunnel_id : 0, group_id);
10463         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10464         return &tbl_data->tbl;
10465 }
10466
10467 void
10468 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10469 {
10470         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10471         struct mlx5_flow_tbl_data_entry *tbl_data =
10472                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10473
10474         MLX5_ASSERT(entry && sh);
10475         if (tbl_data->jump.action)
10476                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10477         if (tbl_data->tbl.obj)
10478                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10479         if (tbl_data->tunnel_offload && tbl_data->external) {
10480                 struct mlx5_list_entry *he;
10481                 struct mlx5_hlist *tunnel_grp_hash;
10482                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10483                 union tunnel_tbl_key tunnel_key = {
10484                         .tunnel_id = tbl_data->tunnel ?
10485                                         tbl_data->tunnel->tunnel_id : 0,
10486                         .group = tbl_data->group_id
10487                 };
10488                 uint32_t table_level = tbl_data->level;
10489                 struct mlx5_flow_cb_ctx ctx = {
10490                         .data = (void *)&tunnel_key.val,
10491                 };
10492
10493                 tunnel_grp_hash = tbl_data->tunnel ?
10494                                         tbl_data->tunnel->groups :
10495                                         thub->groups;
10496                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10497                 if (he)
10498                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10499                 DRV_LOG(DEBUG,
10500                         "table_level %u id %u tunnel %u group %u released.",
10501                         table_level,
10502                         tbl_data->id,
10503                         tbl_data->tunnel ?
10504                         tbl_data->tunnel->tunnel_id : 0,
10505                         tbl_data->group_id);
10506         }
10507         mlx5_list_destroy(tbl_data->matchers);
10508         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10509 }
10510
10511 /**
10512  * Release a flow table.
10513  *
10514  * @param[in] sh
10515  *   Pointer to device shared structure.
10516  * @param[in] tbl
10517  *   Table resource to be released.
10518  *
10519  * @return
10520  *   Returns 0 if table was released, else return 1;
10521  */
10522 static int
10523 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10524                              struct mlx5_flow_tbl_resource *tbl)
10525 {
10526         struct mlx5_flow_tbl_data_entry *tbl_data =
10527                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10528
10529         if (!tbl)
10530                 return 0;
10531         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10532 }
10533
10534 int
10535 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10536                          struct mlx5_list_entry *entry, void *cb_ctx)
10537 {
10538         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10539         struct mlx5_flow_dv_matcher *ref = ctx->data;
10540         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10541                                                         entry);
10542
10543         return cur->crc != ref->crc ||
10544                cur->priority != ref->priority ||
10545                memcmp((const void *)cur->mask.buf,
10546                       (const void *)ref->mask.buf, ref->mask.size);
10547 }
10548
10549 struct mlx5_list_entry *
10550 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10551 {
10552         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10553         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10554         struct mlx5_flow_dv_matcher *ref = ctx->data;
10555         struct mlx5_flow_dv_matcher *resource;
10556         struct mlx5dv_flow_matcher_attr dv_attr = {
10557                 .type = IBV_FLOW_ATTR_NORMAL,
10558                 .match_mask = (void *)&ref->mask,
10559         };
10560         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10561                                                             typeof(*tbl), tbl);
10562         int ret;
10563
10564         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10565                                SOCKET_ID_ANY);
10566         if (!resource) {
10567                 rte_flow_error_set(ctx->error, ENOMEM,
10568                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10569                                    "cannot create matcher");
10570                 return NULL;
10571         }
10572         *resource = *ref;
10573         dv_attr.match_criteria_enable =
10574                 flow_dv_matcher_enable(resource->mask.buf);
10575         __flow_dv_adjust_buf_size(&ref->mask.size,
10576                                   dv_attr.match_criteria_enable);
10577         dv_attr.priority = ref->priority;
10578         if (tbl->is_egress)
10579                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10580         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10581                                                tbl->tbl.obj,
10582                                                &resource->matcher_object);
10583         if (ret) {
10584                 mlx5_free(resource);
10585                 rte_flow_error_set(ctx->error, ENOMEM,
10586                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10587                                    "cannot create matcher");
10588                 return NULL;
10589         }
10590         return &resource->entry;
10591 }
10592
10593 /**
10594  * Register the flow matcher.
10595  *
10596  * @param[in, out] dev
10597  *   Pointer to rte_eth_dev structure.
10598  * @param[in, out] matcher
10599  *   Pointer to flow matcher.
10600  * @param[in, out] key
10601  *   Pointer to flow table key.
10602  * @parm[in, out] dev_flow
10603  *   Pointer to the dev_flow.
10604  * @param[out] error
10605  *   pointer to error structure.
10606  *
10607  * @return
10608  *   0 on success otherwise -errno and errno is set.
10609  */
10610 static int
10611 flow_dv_matcher_register(struct rte_eth_dev *dev,
10612                          struct mlx5_flow_dv_matcher *ref,
10613                          union mlx5_flow_tbl_key *key,
10614                          struct mlx5_flow *dev_flow,
10615                          const struct mlx5_flow_tunnel *tunnel,
10616                          uint32_t group_id,
10617                          struct rte_flow_error *error)
10618 {
10619         struct mlx5_list_entry *entry;
10620         struct mlx5_flow_dv_matcher *resource;
10621         struct mlx5_flow_tbl_resource *tbl;
10622         struct mlx5_flow_tbl_data_entry *tbl_data;
10623         struct mlx5_flow_cb_ctx ctx = {
10624                 .error = error,
10625                 .data = ref,
10626         };
10627         /**
10628          * tunnel offload API requires this registration for cases when
10629          * tunnel match rule was inserted before tunnel set rule.
10630          */
10631         tbl = flow_dv_tbl_resource_get(dev, key->level,
10632                                        key->is_egress, key->is_fdb,
10633                                        dev_flow->external, tunnel,
10634                                        group_id, 0, key->id, error);
10635         if (!tbl)
10636                 return -rte_errno;      /* No need to refill the error info */
10637         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10638         ref->tbl = tbl;
10639         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10640         if (!entry) {
10641                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10642                 return rte_flow_error_set(error, ENOMEM,
10643                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10644                                           "cannot allocate ref memory");
10645         }
10646         resource = container_of(entry, typeof(*resource), entry);
10647         dev_flow->handle->dvh.matcher = resource;
10648         return 0;
10649 }
10650
10651 struct mlx5_list_entry *
10652 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10653 {
10654         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10655         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10656         struct mlx5_flow_dv_tag_resource *entry;
10657         uint32_t idx = 0;
10658         int ret;
10659
10660         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10661         if (!entry) {
10662                 rte_flow_error_set(ctx->error, ENOMEM,
10663                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10664                                    "cannot allocate resource memory");
10665                 return NULL;
10666         }
10667         entry->idx = idx;
10668         entry->tag_id = *(uint32_t *)(ctx->data);
10669         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10670                                                   &entry->action);
10671         if (ret) {
10672                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10673                 rte_flow_error_set(ctx->error, ENOMEM,
10674                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10675                                    NULL, "cannot create action");
10676                 return NULL;
10677         }
10678         return &entry->entry;
10679 }
10680
10681 int
10682 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10683                      void *cb_ctx)
10684 {
10685         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10686         struct mlx5_flow_dv_tag_resource *tag =
10687                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10688
10689         return *(uint32_t *)(ctx->data) != tag->tag_id;
10690 }
10691
10692 struct mlx5_list_entry *
10693 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10694                      void *cb_ctx)
10695 {
10696         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10697         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10698         struct mlx5_flow_dv_tag_resource *entry;
10699         uint32_t idx = 0;
10700
10701         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10702         if (!entry) {
10703                 rte_flow_error_set(ctx->error, ENOMEM,
10704                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10705                                    "cannot allocate tag resource memory");
10706                 return NULL;
10707         }
10708         memcpy(entry, oentry, sizeof(*entry));
10709         entry->idx = idx;
10710         return &entry->entry;
10711 }
10712
10713 void
10714 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10715 {
10716         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10717         struct mlx5_flow_dv_tag_resource *tag =
10718                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10719
10720         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10721 }
10722
10723 /**
10724  * Find existing tag resource or create and register a new one.
10725  *
10726  * @param dev[in, out]
10727  *   Pointer to rte_eth_dev structure.
10728  * @param[in, out] tag_be24
10729  *   Tag value in big endian then R-shift 8.
10730  * @parm[in, out] dev_flow
10731  *   Pointer to the dev_flow.
10732  * @param[out] error
10733  *   pointer to error structure.
10734  *
10735  * @return
10736  *   0 on success otherwise -errno and errno is set.
10737  */
10738 static int
10739 flow_dv_tag_resource_register
10740                         (struct rte_eth_dev *dev,
10741                          uint32_t tag_be24,
10742                          struct mlx5_flow *dev_flow,
10743                          struct rte_flow_error *error)
10744 {
10745         struct mlx5_priv *priv = dev->data->dev_private;
10746         struct mlx5_flow_dv_tag_resource *resource;
10747         struct mlx5_list_entry *entry;
10748         struct mlx5_flow_cb_ctx ctx = {
10749                                         .error = error,
10750                                         .data = &tag_be24,
10751                                         };
10752         struct mlx5_hlist *tag_table;
10753
10754         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10755                                       "tags",
10756                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10757                                       false, false, priv->sh,
10758                                       flow_dv_tag_create_cb,
10759                                       flow_dv_tag_match_cb,
10760                                       flow_dv_tag_remove_cb,
10761                                       flow_dv_tag_clone_cb,
10762                                       flow_dv_tag_clone_free_cb);
10763         if (unlikely(!tag_table))
10764                 return -rte_errno;
10765         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10766         if (entry) {
10767                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10768                                         entry);
10769                 dev_flow->handle->dvh.rix_tag = resource->idx;
10770                 dev_flow->dv.tag_resource = resource;
10771                 return 0;
10772         }
10773         return -rte_errno;
10774 }
10775
10776 void
10777 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10778 {
10779         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10780         struct mlx5_flow_dv_tag_resource *tag =
10781                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10782
10783         MLX5_ASSERT(tag && sh && tag->action);
10784         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10785         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10786         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10787 }
10788
10789 /**
10790  * Release the tag.
10791  *
10792  * @param dev
10793  *   Pointer to Ethernet device.
10794  * @param tag_idx
10795  *   Tag index.
10796  *
10797  * @return
10798  *   1 while a reference on it exists, 0 when freed.
10799  */
10800 static int
10801 flow_dv_tag_release(struct rte_eth_dev *dev,
10802                     uint32_t tag_idx)
10803 {
10804         struct mlx5_priv *priv = dev->data->dev_private;
10805         struct mlx5_flow_dv_tag_resource *tag;
10806
10807         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10808         if (!tag)
10809                 return 0;
10810         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10811                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10812         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10813 }
10814
10815 /**
10816  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10817  *
10818  * @param[in] dev
10819  *   Pointer to rte_eth_dev structure.
10820  * @param[in] action
10821  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10822  * @param[out] dst_port_id
10823  *   The target port ID.
10824  * @param[out] error
10825  *   Pointer to the error structure.
10826  *
10827  * @return
10828  *   0 on success, a negative errno value otherwise and rte_errno is set.
10829  */
10830 static int
10831 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10832                                  const struct rte_flow_action *action,
10833                                  uint32_t *dst_port_id,
10834                                  struct rte_flow_error *error)
10835 {
10836         uint32_t port;
10837         struct mlx5_priv *priv;
10838
10839         switch (action->type) {
10840         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10841                 const struct rte_flow_action_port_id *conf;
10842
10843                 conf = (const struct rte_flow_action_port_id *)action->conf;
10844                 port = conf->original ? dev->data->port_id : conf->id;
10845                 break;
10846         }
10847         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10848                 const struct rte_flow_action_ethdev *ethdev;
10849
10850                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10851                 port = ethdev->port_id;
10852                 break;
10853         }
10854         default:
10855                 MLX5_ASSERT(false);
10856                 return rte_flow_error_set(error, EINVAL,
10857                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10858                                           "unknown E-Switch action");
10859         }
10860
10861         priv = mlx5_port_to_eswitch_info(port, false);
10862         if (!priv)
10863                 return rte_flow_error_set(error, -rte_errno,
10864                                           RTE_FLOW_ERROR_TYPE_ACTION,
10865                                           NULL,
10866                                           "No eswitch info was found for port");
10867 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10868         /*
10869          * This parameter is transferred to
10870          * mlx5dv_dr_action_create_dest_ib_port().
10871          */
10872         *dst_port_id = priv->dev_port;
10873 #else
10874         /*
10875          * Legacy mode, no LAG configurations is supported.
10876          * This parameter is transferred to
10877          * mlx5dv_dr_action_create_dest_vport().
10878          */
10879         *dst_port_id = priv->vport_id;
10880 #endif
10881         return 0;
10882 }
10883
10884 /**
10885  * Create a counter with aging configuration.
10886  *
10887  * @param[in] dev
10888  *   Pointer to rte_eth_dev structure.
10889  * @param[in] dev_flow
10890  *   Pointer to the mlx5_flow.
10891  * @param[out] count
10892  *   Pointer to the counter action configuration.
10893  * @param[in] age
10894  *   Pointer to the aging action configuration.
10895  *
10896  * @return
10897  *   Index to flow counter on success, 0 otherwise.
10898  */
10899 static uint32_t
10900 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10901                                 struct mlx5_flow *dev_flow,
10902                                 const struct rte_flow_action_count *count
10903                                         __rte_unused,
10904                                 const struct rte_flow_action_age *age)
10905 {
10906         uint32_t counter;
10907         struct mlx5_age_param *age_param;
10908
10909         counter = flow_dv_counter_alloc(dev, !!age);
10910         if (!counter || age == NULL)
10911                 return counter;
10912         age_param = flow_dv_counter_idx_get_age(dev, counter);
10913         age_param->context = age->context ? age->context :
10914                 (void *)(uintptr_t)(dev_flow->flow_idx);
10915         age_param->timeout = age->timeout;
10916         age_param->port_id = dev->data->port_id;
10917         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10918         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10919         return counter;
10920 }
10921
10922 /**
10923  * Add Tx queue matcher
10924  *
10925  * @param[in] dev
10926  *   Pointer to the dev struct.
10927  * @param[in, out] matcher
10928  *   Flow matcher.
10929  * @param[in, out] key
10930  *   Flow matcher value.
10931  * @param[in] item
10932  *   Flow pattern to translate.
10933  * @param[in] inner
10934  *   Item is inner pattern.
10935  */
10936 static void
10937 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10938                                 void *matcher, void *key,
10939                                 const struct rte_flow_item *item)
10940 {
10941         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10942         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10943         void *misc_m =
10944                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10945         void *misc_v =
10946                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10947         struct mlx5_txq_ctrl *txq;
10948         uint32_t queue, mask;
10949
10950         queue_m = (const void *)item->mask;
10951         queue_v = (const void *)item->spec;
10952         if (!queue_v)
10953                 return;
10954         txq = mlx5_txq_get(dev, queue_v->queue);
10955         if (!txq)
10956                 return;
10957         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10958                 queue = txq->obj->sq->id;
10959         else
10960                 queue = txq->obj->sq_obj.sq->id;
10961         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10962         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10963         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10964         mlx5_txq_release(dev, queue_v->queue);
10965 }
10966
10967 /**
10968  * Set the hash fields according to the @p flow information.
10969  *
10970  * @param[in] dev_flow
10971  *   Pointer to the mlx5_flow.
10972  * @param[in] rss_desc
10973  *   Pointer to the mlx5_flow_rss_desc.
10974  */
10975 static void
10976 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10977                        struct mlx5_flow_rss_desc *rss_desc)
10978 {
10979         uint64_t items = dev_flow->handle->layers;
10980         int rss_inner = 0;
10981         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10982
10983         dev_flow->hash_fields = 0;
10984 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10985         if (rss_desc->level >= 2)
10986                 rss_inner = 1;
10987 #endif
10988         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10989             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10990                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10991                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10992                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10993                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10994                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10995                         else
10996                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10997                 }
10998         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10999                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
11000                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11001                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11002                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
11003                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11004                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
11005                         else
11006                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
11007                 }
11008         }
11009         if (dev_flow->hash_fields == 0)
11010                 /*
11011                  * There is no match between the RSS types and the
11012                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11013                  */
11014                 return;
11015         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11016             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
11017                 if (rss_types & RTE_ETH_RSS_UDP) {
11018                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11019                                 dev_flow->hash_fields |=
11020                                                 IBV_RX_HASH_SRC_PORT_UDP;
11021                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11022                                 dev_flow->hash_fields |=
11023                                                 IBV_RX_HASH_DST_PORT_UDP;
11024                         else
11025                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
11026                 }
11027         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11028                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
11029                 if (rss_types & RTE_ETH_RSS_TCP) {
11030                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11031                                 dev_flow->hash_fields |=
11032                                                 IBV_RX_HASH_SRC_PORT_TCP;
11033                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11034                                 dev_flow->hash_fields |=
11035                                                 IBV_RX_HASH_DST_PORT_TCP;
11036                         else
11037                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
11038                 }
11039         }
11040         if (rss_inner)
11041                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
11042 }
11043
11044 /**
11045  * Prepare an Rx Hash queue.
11046  *
11047  * @param dev
11048  *   Pointer to Ethernet device.
11049  * @param[in] dev_flow
11050  *   Pointer to the mlx5_flow.
11051  * @param[in] rss_desc
11052  *   Pointer to the mlx5_flow_rss_desc.
11053  * @param[out] hrxq_idx
11054  *   Hash Rx queue index.
11055  *
11056  * @return
11057  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11058  */
11059 static struct mlx5_hrxq *
11060 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11061                      struct mlx5_flow *dev_flow,
11062                      struct mlx5_flow_rss_desc *rss_desc,
11063                      uint32_t *hrxq_idx)
11064 {
11065         struct mlx5_priv *priv = dev->data->dev_private;
11066         struct mlx5_flow_handle *dh = dev_flow->handle;
11067         struct mlx5_hrxq *hrxq;
11068
11069         MLX5_ASSERT(rss_desc->queue_num);
11070         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11071         rss_desc->hash_fields = dev_flow->hash_fields;
11072         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11073         rss_desc->shared_rss = 0;
11074         if (rss_desc->hash_fields == 0)
11075                 rss_desc->queue_num = 1;
11076         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11077         if (!*hrxq_idx)
11078                 return NULL;
11079         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11080                               *hrxq_idx);
11081         return hrxq;
11082 }
11083
11084 /**
11085  * Release sample sub action resource.
11086  *
11087  * @param[in, out] dev
11088  *   Pointer to rte_eth_dev structure.
11089  * @param[in] act_res
11090  *   Pointer to sample sub action resource.
11091  */
11092 static void
11093 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11094                                    struct mlx5_flow_sub_actions_idx *act_res)
11095 {
11096         if (act_res->rix_hrxq) {
11097                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11098                 act_res->rix_hrxq = 0;
11099         }
11100         if (act_res->rix_encap_decap) {
11101                 flow_dv_encap_decap_resource_release(dev,
11102                                                      act_res->rix_encap_decap);
11103                 act_res->rix_encap_decap = 0;
11104         }
11105         if (act_res->rix_port_id_action) {
11106                 flow_dv_port_id_action_resource_release(dev,
11107                                                 act_res->rix_port_id_action);
11108                 act_res->rix_port_id_action = 0;
11109         }
11110         if (act_res->rix_tag) {
11111                 flow_dv_tag_release(dev, act_res->rix_tag);
11112                 act_res->rix_tag = 0;
11113         }
11114         if (act_res->rix_jump) {
11115                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11116                 act_res->rix_jump = 0;
11117         }
11118 }
11119
11120 int
11121 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11122                         struct mlx5_list_entry *entry, void *cb_ctx)
11123 {
11124         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11125         struct rte_eth_dev *dev = ctx->dev;
11126         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11127         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11128                                                               typeof(*resource),
11129                                                               entry);
11130
11131         if (ctx_resource->ratio == resource->ratio &&
11132             ctx_resource->ft_type == resource->ft_type &&
11133             ctx_resource->ft_id == resource->ft_id &&
11134             ctx_resource->set_action == resource->set_action &&
11135             !memcmp((void *)&ctx_resource->sample_act,
11136                     (void *)&resource->sample_act,
11137                     sizeof(struct mlx5_flow_sub_actions_list))) {
11138                 /*
11139                  * Existing sample action should release the prepared
11140                  * sub-actions reference counter.
11141                  */
11142                 flow_dv_sample_sub_actions_release(dev,
11143                                                    &ctx_resource->sample_idx);
11144                 return 0;
11145         }
11146         return 1;
11147 }
11148
11149 struct mlx5_list_entry *
11150 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11151 {
11152         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11153         struct rte_eth_dev *dev = ctx->dev;
11154         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11155         void **sample_dv_actions = ctx_resource->sub_actions;
11156         struct mlx5_flow_dv_sample_resource *resource;
11157         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11158         struct mlx5_priv *priv = dev->data->dev_private;
11159         struct mlx5_dev_ctx_shared *sh = priv->sh;
11160         struct mlx5_flow_tbl_resource *tbl;
11161         uint32_t idx = 0;
11162         const uint32_t next_ft_step = 1;
11163         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11164         uint8_t is_egress = 0;
11165         uint8_t is_transfer = 0;
11166         struct rte_flow_error *error = ctx->error;
11167
11168         /* Register new sample resource. */
11169         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11170         if (!resource) {
11171                 rte_flow_error_set(error, ENOMEM,
11172                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11173                                           NULL,
11174                                           "cannot allocate resource memory");
11175                 return NULL;
11176         }
11177         *resource = *ctx_resource;
11178         /* Create normal path table level */
11179         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11180                 is_transfer = 1;
11181         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11182                 is_egress = 1;
11183         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11184                                         is_egress, is_transfer,
11185                                         true, NULL, 0, 0, 0, error);
11186         if (!tbl) {
11187                 rte_flow_error_set(error, ENOMEM,
11188                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11189                                           NULL,
11190                                           "fail to create normal path table "
11191                                           "for sample");
11192                 goto error;
11193         }
11194         resource->normal_path_tbl = tbl;
11195         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11196                 if (!sh->default_miss_action) {
11197                         rte_flow_error_set(error, ENOMEM,
11198                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11199                                                 NULL,
11200                                                 "default miss action was not "
11201                                                 "created");
11202                         goto error;
11203                 }
11204                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11205                                                 sh->default_miss_action;
11206         }
11207         /* Create a DR sample action */
11208         sampler_attr.sample_ratio = resource->ratio;
11209         sampler_attr.default_next_table = tbl->obj;
11210         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11211         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11212                                                         &sample_dv_actions[0];
11213         sampler_attr.action = resource->set_action;
11214         if (mlx5_os_flow_dr_create_flow_action_sampler
11215                         (&sampler_attr, &resource->verbs_action)) {
11216                 rte_flow_error_set(error, ENOMEM,
11217                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11218                                         NULL, "cannot create sample action");
11219                 goto error;
11220         }
11221         resource->idx = idx;
11222         resource->dev = dev;
11223         return &resource->entry;
11224 error:
11225         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11226                 flow_dv_sample_sub_actions_release(dev,
11227                                                    &resource->sample_idx);
11228         if (resource->normal_path_tbl)
11229                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11230                                 resource->normal_path_tbl);
11231         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11232         return NULL;
11233
11234 }
11235
11236 struct mlx5_list_entry *
11237 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11238                          struct mlx5_list_entry *entry __rte_unused,
11239                          void *cb_ctx)
11240 {
11241         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11242         struct rte_eth_dev *dev = ctx->dev;
11243         struct mlx5_flow_dv_sample_resource *resource;
11244         struct mlx5_priv *priv = dev->data->dev_private;
11245         struct mlx5_dev_ctx_shared *sh = priv->sh;
11246         uint32_t idx = 0;
11247
11248         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11249         if (!resource) {
11250                 rte_flow_error_set(ctx->error, ENOMEM,
11251                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11252                                           NULL,
11253                                           "cannot allocate resource memory");
11254                 return NULL;
11255         }
11256         memcpy(resource, entry, sizeof(*resource));
11257         resource->idx = idx;
11258         resource->dev = dev;
11259         return &resource->entry;
11260 }
11261
11262 void
11263 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11264                              struct mlx5_list_entry *entry)
11265 {
11266         struct mlx5_flow_dv_sample_resource *resource =
11267                                   container_of(entry, typeof(*resource), entry);
11268         struct rte_eth_dev *dev = resource->dev;
11269         struct mlx5_priv *priv = dev->data->dev_private;
11270
11271         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11272 }
11273
11274 /**
11275  * Find existing sample resource or create and register a new one.
11276  *
11277  * @param[in, out] dev
11278  *   Pointer to rte_eth_dev structure.
11279  * @param[in] ref
11280  *   Pointer to sample resource reference.
11281  * @parm[in, out] dev_flow
11282  *   Pointer to the dev_flow.
11283  * @param[out] error
11284  *   pointer to error structure.
11285  *
11286  * @return
11287  *   0 on success otherwise -errno and errno is set.
11288  */
11289 static int
11290 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11291                          struct mlx5_flow_dv_sample_resource *ref,
11292                          struct mlx5_flow *dev_flow,
11293                          struct rte_flow_error *error)
11294 {
11295         struct mlx5_flow_dv_sample_resource *resource;
11296         struct mlx5_list_entry *entry;
11297         struct mlx5_priv *priv = dev->data->dev_private;
11298         struct mlx5_flow_cb_ctx ctx = {
11299                 .dev = dev,
11300                 .error = error,
11301                 .data = ref,
11302         };
11303
11304         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11305         if (!entry)
11306                 return -rte_errno;
11307         resource = container_of(entry, typeof(*resource), entry);
11308         dev_flow->handle->dvh.rix_sample = resource->idx;
11309         dev_flow->dv.sample_res = resource;
11310         return 0;
11311 }
11312
11313 int
11314 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11315                             struct mlx5_list_entry *entry, void *cb_ctx)
11316 {
11317         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11318         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11319         struct rte_eth_dev *dev = ctx->dev;
11320         struct mlx5_flow_dv_dest_array_resource *resource =
11321                                   container_of(entry, typeof(*resource), entry);
11322         uint32_t idx = 0;
11323
11324         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11325             ctx_resource->ft_type == resource->ft_type &&
11326             !memcmp((void *)resource->sample_act,
11327                     (void *)ctx_resource->sample_act,
11328                    (ctx_resource->num_of_dest *
11329                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11330                 /*
11331                  * Existing sample action should release the prepared
11332                  * sub-actions reference counter.
11333                  */
11334                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11335                         flow_dv_sample_sub_actions_release(dev,
11336                                         &ctx_resource->sample_idx[idx]);
11337                 return 0;
11338         }
11339         return 1;
11340 }
11341
11342 struct mlx5_list_entry *
11343 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11344 {
11345         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11346         struct rte_eth_dev *dev = ctx->dev;
11347         struct mlx5_flow_dv_dest_array_resource *resource;
11348         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11349         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11350         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11351         struct mlx5_priv *priv = dev->data->dev_private;
11352         struct mlx5_dev_ctx_shared *sh = priv->sh;
11353         struct mlx5_flow_sub_actions_list *sample_act;
11354         struct mlx5dv_dr_domain *domain;
11355         uint32_t idx = 0, res_idx = 0;
11356         struct rte_flow_error *error = ctx->error;
11357         uint64_t action_flags;
11358         int ret;
11359
11360         /* Register new destination array resource. */
11361         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11362                                             &res_idx);
11363         if (!resource) {
11364                 rte_flow_error_set(error, ENOMEM,
11365                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11366                                           NULL,
11367                                           "cannot allocate resource memory");
11368                 return NULL;
11369         }
11370         *resource = *ctx_resource;
11371         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11372                 domain = sh->fdb_domain;
11373         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11374                 domain = sh->rx_domain;
11375         else
11376                 domain = sh->tx_domain;
11377         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11378                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11379                                  mlx5_malloc(MLX5_MEM_ZERO,
11380                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11381                                  0, SOCKET_ID_ANY);
11382                 if (!dest_attr[idx]) {
11383                         rte_flow_error_set(error, ENOMEM,
11384                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11385                                            NULL,
11386                                            "cannot allocate resource memory");
11387                         goto error;
11388                 }
11389                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11390                 sample_act = &ctx_resource->sample_act[idx];
11391                 action_flags = sample_act->action_flags;
11392                 switch (action_flags) {
11393                 case MLX5_FLOW_ACTION_QUEUE:
11394                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11395                         break;
11396                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11397                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11398                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11399                         dest_attr[idx]->dest_reformat->reformat =
11400                                         sample_act->dr_encap_action;
11401                         dest_attr[idx]->dest_reformat->dest =
11402                                         sample_act->dr_port_id_action;
11403                         break;
11404                 case MLX5_FLOW_ACTION_PORT_ID:
11405                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11406                         break;
11407                 case MLX5_FLOW_ACTION_JUMP:
11408                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11409                         break;
11410                 default:
11411                         rte_flow_error_set(error, EINVAL,
11412                                            RTE_FLOW_ERROR_TYPE_ACTION,
11413                                            NULL,
11414                                            "unsupported actions type");
11415                         goto error;
11416                 }
11417         }
11418         /* create a dest array action */
11419         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11420                                                 (domain,
11421                                                  resource->num_of_dest,
11422                                                  dest_attr,
11423                                                  &resource->action);
11424         if (ret) {
11425                 rte_flow_error_set(error, ENOMEM,
11426                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11427                                    NULL,
11428                                    "cannot create destination array action");
11429                 goto error;
11430         }
11431         resource->idx = res_idx;
11432         resource->dev = dev;
11433         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11434                 mlx5_free(dest_attr[idx]);
11435         return &resource->entry;
11436 error:
11437         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11438                 flow_dv_sample_sub_actions_release(dev,
11439                                                    &resource->sample_idx[idx]);
11440                 if (dest_attr[idx])
11441                         mlx5_free(dest_attr[idx]);
11442         }
11443         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11444         return NULL;
11445 }
11446
11447 struct mlx5_list_entry *
11448 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11449                             struct mlx5_list_entry *entry __rte_unused,
11450                             void *cb_ctx)
11451 {
11452         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11453         struct rte_eth_dev *dev = ctx->dev;
11454         struct mlx5_flow_dv_dest_array_resource *resource;
11455         struct mlx5_priv *priv = dev->data->dev_private;
11456         struct mlx5_dev_ctx_shared *sh = priv->sh;
11457         uint32_t res_idx = 0;
11458         struct rte_flow_error *error = ctx->error;
11459
11460         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11461                                       &res_idx);
11462         if (!resource) {
11463                 rte_flow_error_set(error, ENOMEM,
11464                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11465                                           NULL,
11466                                           "cannot allocate dest-array memory");
11467                 return NULL;
11468         }
11469         memcpy(resource, entry, sizeof(*resource));
11470         resource->idx = res_idx;
11471         resource->dev = dev;
11472         return &resource->entry;
11473 }
11474
11475 void
11476 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11477                                  struct mlx5_list_entry *entry)
11478 {
11479         struct mlx5_flow_dv_dest_array_resource *resource =
11480                         container_of(entry, typeof(*resource), entry);
11481         struct rte_eth_dev *dev = resource->dev;
11482         struct mlx5_priv *priv = dev->data->dev_private;
11483
11484         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11485 }
11486
11487 /**
11488  * Find existing destination array resource or create and register a new one.
11489  *
11490  * @param[in, out] dev
11491  *   Pointer to rte_eth_dev structure.
11492  * @param[in] ref
11493  *   Pointer to destination array resource reference.
11494  * @parm[in, out] dev_flow
11495  *   Pointer to the dev_flow.
11496  * @param[out] error
11497  *   pointer to error structure.
11498  *
11499  * @return
11500  *   0 on success otherwise -errno and errno is set.
11501  */
11502 static int
11503 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11504                          struct mlx5_flow_dv_dest_array_resource *ref,
11505                          struct mlx5_flow *dev_flow,
11506                          struct rte_flow_error *error)
11507 {
11508         struct mlx5_flow_dv_dest_array_resource *resource;
11509         struct mlx5_priv *priv = dev->data->dev_private;
11510         struct mlx5_list_entry *entry;
11511         struct mlx5_flow_cb_ctx ctx = {
11512                 .dev = dev,
11513                 .error = error,
11514                 .data = ref,
11515         };
11516
11517         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11518         if (!entry)
11519                 return -rte_errno;
11520         resource = container_of(entry, typeof(*resource), entry);
11521         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11522         dev_flow->dv.dest_array_res = resource;
11523         return 0;
11524 }
11525
11526 /**
11527  * Convert Sample action to DV specification.
11528  *
11529  * @param[in] dev
11530  *   Pointer to rte_eth_dev structure.
11531  * @param[in] action
11532  *   Pointer to sample action structure.
11533  * @param[in, out] dev_flow
11534  *   Pointer to the mlx5_flow.
11535  * @param[in] attr
11536  *   Pointer to the flow attributes.
11537  * @param[in, out] num_of_dest
11538  *   Pointer to the num of destination.
11539  * @param[in, out] sample_actions
11540  *   Pointer to sample actions list.
11541  * @param[in, out] res
11542  *   Pointer to sample resource.
11543  * @param[out] error
11544  *   Pointer to the error structure.
11545  *
11546  * @return
11547  *   0 on success, a negative errno value otherwise and rte_errno is set.
11548  */
11549 static int
11550 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11551                                 const struct rte_flow_action_sample *action,
11552                                 struct mlx5_flow *dev_flow,
11553                                 const struct rte_flow_attr *attr,
11554                                 uint32_t *num_of_dest,
11555                                 void **sample_actions,
11556                                 struct mlx5_flow_dv_sample_resource *res,
11557                                 struct rte_flow_error *error)
11558 {
11559         struct mlx5_priv *priv = dev->data->dev_private;
11560         const struct rte_flow_action *sub_actions;
11561         struct mlx5_flow_sub_actions_list *sample_act;
11562         struct mlx5_flow_sub_actions_idx *sample_idx;
11563         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11564         struct rte_flow *flow = dev_flow->flow;
11565         struct mlx5_flow_rss_desc *rss_desc;
11566         uint64_t action_flags = 0;
11567
11568         MLX5_ASSERT(wks);
11569         rss_desc = &wks->rss_desc;
11570         sample_act = &res->sample_act;
11571         sample_idx = &res->sample_idx;
11572         res->ratio = action->ratio;
11573         sub_actions = action->actions;
11574         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11575                 int type = sub_actions->type;
11576                 uint32_t pre_rix = 0;
11577                 void *pre_r;
11578                 switch (type) {
11579                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11580                 {
11581                         const struct rte_flow_action_queue *queue;
11582                         struct mlx5_hrxq *hrxq;
11583                         uint32_t hrxq_idx;
11584
11585                         queue = sub_actions->conf;
11586                         rss_desc->queue_num = 1;
11587                         rss_desc->queue[0] = queue->index;
11588                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11589                                                     rss_desc, &hrxq_idx);
11590                         if (!hrxq)
11591                                 return rte_flow_error_set
11592                                         (error, rte_errno,
11593                                          RTE_FLOW_ERROR_TYPE_ACTION,
11594                                          NULL,
11595                                          "cannot create fate queue");
11596                         sample_act->dr_queue_action = hrxq->action;
11597                         sample_idx->rix_hrxq = hrxq_idx;
11598                         sample_actions[sample_act->actions_num++] =
11599                                                 hrxq->action;
11600                         (*num_of_dest)++;
11601                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11602                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11603                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11604                         dev_flow->handle->fate_action =
11605                                         MLX5_FLOW_FATE_QUEUE;
11606                         break;
11607                 }
11608                 case RTE_FLOW_ACTION_TYPE_RSS:
11609                 {
11610                         struct mlx5_hrxq *hrxq;
11611                         uint32_t hrxq_idx;
11612                         const struct rte_flow_action_rss *rss;
11613                         const uint8_t *rss_key;
11614
11615                         rss = sub_actions->conf;
11616                         memcpy(rss_desc->queue, rss->queue,
11617                                rss->queue_num * sizeof(uint16_t));
11618                         rss_desc->queue_num = rss->queue_num;
11619                         /* NULL RSS key indicates default RSS key. */
11620                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11621                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11622                         /*
11623                          * rss->level and rss.types should be set in advance
11624                          * when expanding items for RSS.
11625                          */
11626                         flow_dv_hashfields_set(dev_flow, rss_desc);
11627                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11628                                                     rss_desc, &hrxq_idx);
11629                         if (!hrxq)
11630                                 return rte_flow_error_set
11631                                         (error, rte_errno,
11632                                          RTE_FLOW_ERROR_TYPE_ACTION,
11633                                          NULL,
11634                                          "cannot create fate queue");
11635                         sample_act->dr_queue_action = hrxq->action;
11636                         sample_idx->rix_hrxq = hrxq_idx;
11637                         sample_actions[sample_act->actions_num++] =
11638                                                 hrxq->action;
11639                         (*num_of_dest)++;
11640                         action_flags |= MLX5_FLOW_ACTION_RSS;
11641                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11642                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11643                         dev_flow->handle->fate_action =
11644                                         MLX5_FLOW_FATE_QUEUE;
11645                         break;
11646                 }
11647                 case RTE_FLOW_ACTION_TYPE_MARK:
11648                 {
11649                         uint32_t tag_be = mlx5_flow_mark_set
11650                                 (((const struct rte_flow_action_mark *)
11651                                 (sub_actions->conf))->id);
11652
11653                         wks->mark = 1;
11654                         pre_rix = dev_flow->handle->dvh.rix_tag;
11655                         /* Save the mark resource before sample */
11656                         pre_r = dev_flow->dv.tag_resource;
11657                         if (flow_dv_tag_resource_register(dev, tag_be,
11658                                                   dev_flow, error))
11659                                 return -rte_errno;
11660                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11661                         sample_act->dr_tag_action =
11662                                 dev_flow->dv.tag_resource->action;
11663                         sample_idx->rix_tag =
11664                                 dev_flow->handle->dvh.rix_tag;
11665                         sample_actions[sample_act->actions_num++] =
11666                                                 sample_act->dr_tag_action;
11667                         /* Recover the mark resource after sample */
11668                         dev_flow->dv.tag_resource = pre_r;
11669                         dev_flow->handle->dvh.rix_tag = pre_rix;
11670                         action_flags |= MLX5_FLOW_ACTION_MARK;
11671                         break;
11672                 }
11673                 case RTE_FLOW_ACTION_TYPE_COUNT:
11674                 {
11675                         if (!flow->counter) {
11676                                 flow->counter =
11677                                         flow_dv_translate_create_counter(dev,
11678                                                 dev_flow, sub_actions->conf,
11679                                                 0);
11680                                 if (!flow->counter)
11681                                         return rte_flow_error_set
11682                                                 (error, rte_errno,
11683                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11684                                                 NULL,
11685                                                 "cannot create counter"
11686                                                 " object.");
11687                         }
11688                         sample_act->dr_cnt_action =
11689                                   (flow_dv_counter_get_by_idx(dev,
11690                                   flow->counter, NULL))->action;
11691                         sample_actions[sample_act->actions_num++] =
11692                                                 sample_act->dr_cnt_action;
11693                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11694                         break;
11695                 }
11696                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11697                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11698                 {
11699                         struct mlx5_flow_dv_port_id_action_resource
11700                                         port_id_resource;
11701                         uint32_t port_id = 0;
11702
11703                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11704                         /* Save the port id resource before sample */
11705                         pre_rix = dev_flow->handle->rix_port_id_action;
11706                         pre_r = dev_flow->dv.port_id_action;
11707                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11708                                                              &port_id, error))
11709                                 return -rte_errno;
11710                         port_id_resource.port_id = port_id;
11711                         if (flow_dv_port_id_action_resource_register
11712                             (dev, &port_id_resource, dev_flow, error))
11713                                 return -rte_errno;
11714                         sample_act->dr_port_id_action =
11715                                 dev_flow->dv.port_id_action->action;
11716                         sample_idx->rix_port_id_action =
11717                                 dev_flow->handle->rix_port_id_action;
11718                         sample_actions[sample_act->actions_num++] =
11719                                                 sample_act->dr_port_id_action;
11720                         /* Recover the port id resource after sample */
11721                         dev_flow->dv.port_id_action = pre_r;
11722                         dev_flow->handle->rix_port_id_action = pre_rix;
11723                         (*num_of_dest)++;
11724                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11725                         break;
11726                 }
11727                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11728                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11729                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11730                         /* Save the encap resource before sample */
11731                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11732                         pre_r = dev_flow->dv.encap_decap;
11733                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11734                                                            dev_flow,
11735                                                            attr->transfer,
11736                                                            error))
11737                                 return -rte_errno;
11738                         sample_act->dr_encap_action =
11739                                 dev_flow->dv.encap_decap->action;
11740                         sample_idx->rix_encap_decap =
11741                                 dev_flow->handle->dvh.rix_encap_decap;
11742                         sample_actions[sample_act->actions_num++] =
11743                                                 sample_act->dr_encap_action;
11744                         /* Recover the encap resource after sample */
11745                         dev_flow->dv.encap_decap = pre_r;
11746                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11747                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11748                         break;
11749                 default:
11750                         return rte_flow_error_set(error, EINVAL,
11751                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11752                                 NULL,
11753                                 "Not support for sampler action");
11754                 }
11755         }
11756         sample_act->action_flags = action_flags;
11757         res->ft_id = dev_flow->dv.group;
11758         if (attr->transfer) {
11759                 union {
11760                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11761                         uint64_t set_action;
11762                 } action_ctx = { .set_action = 0 };
11763
11764                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11765                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11766                          MLX5_MODIFICATION_TYPE_SET);
11767                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11768                          MLX5_MODI_META_REG_C_0);
11769                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11770                          priv->vport_meta_tag);
11771                 res->set_action = action_ctx.set_action;
11772         } else if (attr->ingress) {
11773                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11774         } else {
11775                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11776         }
11777         return 0;
11778 }
11779
11780 /**
11781  * Convert Sample action to DV specification.
11782  *
11783  * @param[in] dev
11784  *   Pointer to rte_eth_dev structure.
11785  * @param[in, out] dev_flow
11786  *   Pointer to the mlx5_flow.
11787  * @param[in] num_of_dest
11788  *   The num of destination.
11789  * @param[in, out] res
11790  *   Pointer to sample resource.
11791  * @param[in, out] mdest_res
11792  *   Pointer to destination array resource.
11793  * @param[in] sample_actions
11794  *   Pointer to sample path actions list.
11795  * @param[in] action_flags
11796  *   Holds the actions detected until now.
11797  * @param[out] error
11798  *   Pointer to the error structure.
11799  *
11800  * @return
11801  *   0 on success, a negative errno value otherwise and rte_errno is set.
11802  */
11803 static int
11804 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11805                              struct mlx5_flow *dev_flow,
11806                              uint32_t num_of_dest,
11807                              struct mlx5_flow_dv_sample_resource *res,
11808                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11809                              void **sample_actions,
11810                              uint64_t action_flags,
11811                              struct rte_flow_error *error)
11812 {
11813         /* update normal path action resource into last index of array */
11814         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11815         struct mlx5_flow_sub_actions_list *sample_act =
11816                                         &mdest_res->sample_act[dest_index];
11817         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11818         struct mlx5_flow_rss_desc *rss_desc;
11819         uint32_t normal_idx = 0;
11820         struct mlx5_hrxq *hrxq;
11821         uint32_t hrxq_idx;
11822
11823         MLX5_ASSERT(wks);
11824         rss_desc = &wks->rss_desc;
11825         if (num_of_dest > 1) {
11826                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11827                         /* Handle QP action for mirroring */
11828                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11829                                                     rss_desc, &hrxq_idx);
11830                         if (!hrxq)
11831                                 return rte_flow_error_set
11832                                      (error, rte_errno,
11833                                       RTE_FLOW_ERROR_TYPE_ACTION,
11834                                       NULL,
11835                                       "cannot create rx queue");
11836                         normal_idx++;
11837                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11838                         sample_act->dr_queue_action = hrxq->action;
11839                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11840                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11841                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11842                 }
11843                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11844                         normal_idx++;
11845                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11846                                 dev_flow->handle->dvh.rix_encap_decap;
11847                         sample_act->dr_encap_action =
11848                                 dev_flow->dv.encap_decap->action;
11849                         dev_flow->handle->dvh.rix_encap_decap = 0;
11850                 }
11851                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11852                         normal_idx++;
11853                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11854                                 dev_flow->handle->rix_port_id_action;
11855                         sample_act->dr_port_id_action =
11856                                 dev_flow->dv.port_id_action->action;
11857                         dev_flow->handle->rix_port_id_action = 0;
11858                 }
11859                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11860                         normal_idx++;
11861                         mdest_res->sample_idx[dest_index].rix_jump =
11862                                 dev_flow->handle->rix_jump;
11863                         sample_act->dr_jump_action =
11864                                 dev_flow->dv.jump->action;
11865                         dev_flow->handle->rix_jump = 0;
11866                 }
11867                 sample_act->actions_num = normal_idx;
11868                 /* update sample action resource into first index of array */
11869                 mdest_res->ft_type = res->ft_type;
11870                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11871                                 sizeof(struct mlx5_flow_sub_actions_idx));
11872                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11873                                 sizeof(struct mlx5_flow_sub_actions_list));
11874                 mdest_res->num_of_dest = num_of_dest;
11875                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11876                                                          dev_flow, error))
11877                         return rte_flow_error_set(error, EINVAL,
11878                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11879                                                   NULL, "can't create sample "
11880                                                   "action");
11881         } else {
11882                 res->sub_actions = sample_actions;
11883                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11884                         return rte_flow_error_set(error, EINVAL,
11885                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11886                                                   NULL,
11887                                                   "can't create sample action");
11888         }
11889         return 0;
11890 }
11891
11892 /**
11893  * Remove an ASO age action from age actions list.
11894  *
11895  * @param[in] dev
11896  *   Pointer to the Ethernet device structure.
11897  * @param[in] age
11898  *   Pointer to the aso age action handler.
11899  */
11900 static void
11901 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11902                                 struct mlx5_aso_age_action *age)
11903 {
11904         struct mlx5_age_info *age_info;
11905         struct mlx5_age_param *age_param = &age->age_params;
11906         struct mlx5_priv *priv = dev->data->dev_private;
11907         uint16_t expected = AGE_CANDIDATE;
11908
11909         age_info = GET_PORT_AGE_INFO(priv);
11910         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11911                                          AGE_FREE, false, __ATOMIC_RELAXED,
11912                                          __ATOMIC_RELAXED)) {
11913                 /**
11914                  * We need the lock even it is age timeout,
11915                  * since age action may still in process.
11916                  */
11917                 rte_spinlock_lock(&age_info->aged_sl);
11918                 LIST_REMOVE(age, next);
11919                 rte_spinlock_unlock(&age_info->aged_sl);
11920                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11921         }
11922 }
11923
11924 /**
11925  * Release an ASO age action.
11926  *
11927  * @param[in] dev
11928  *   Pointer to the Ethernet device structure.
11929  * @param[in] age_idx
11930  *   Index of ASO age action to release.
11931  * @param[in] flow
11932  *   True if the release operation is during flow destroy operation.
11933  *   False if the release operation is during action destroy operation.
11934  *
11935  * @return
11936  *   0 when age action was removed, otherwise the number of references.
11937  */
11938 static int
11939 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11940 {
11941         struct mlx5_priv *priv = dev->data->dev_private;
11942         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11943         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11944         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11945
11946         if (!ret) {
11947                 flow_dv_aso_age_remove_from_age(dev, age);
11948                 rte_spinlock_lock(&mng->free_sl);
11949                 LIST_INSERT_HEAD(&mng->free, age, next);
11950                 rte_spinlock_unlock(&mng->free_sl);
11951         }
11952         return ret;
11953 }
11954
11955 /**
11956  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11957  *
11958  * @param[in] dev
11959  *   Pointer to the Ethernet device structure.
11960  *
11961  * @return
11962  *   0 on success, otherwise negative errno value and rte_errno is set.
11963  */
11964 static int
11965 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11966 {
11967         struct mlx5_priv *priv = dev->data->dev_private;
11968         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11969         void *old_pools = mng->pools;
11970         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11971         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11972         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11973
11974         if (!pools) {
11975                 rte_errno = ENOMEM;
11976                 return -ENOMEM;
11977         }
11978         if (old_pools) {
11979                 memcpy(pools, old_pools,
11980                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11981                 mlx5_free(old_pools);
11982         } else {
11983                 /* First ASO flow hit allocation - starting ASO data-path. */
11984                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11985
11986                 if (ret) {
11987                         mlx5_free(pools);
11988                         return ret;
11989                 }
11990         }
11991         mng->n = resize;
11992         mng->pools = pools;
11993         return 0;
11994 }
11995
11996 /**
11997  * Create and initialize a new ASO aging pool.
11998  *
11999  * @param[in] dev
12000  *   Pointer to the Ethernet device structure.
12001  * @param[out] age_free
12002  *   Where to put the pointer of a new age action.
12003  *
12004  * @return
12005  *   The age actions pool pointer and @p age_free is set on success,
12006  *   NULL otherwise and rte_errno is set.
12007  */
12008 static struct mlx5_aso_age_pool *
12009 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12010                         struct mlx5_aso_age_action **age_free)
12011 {
12012         struct mlx5_priv *priv = dev->data->dev_private;
12013         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12014         struct mlx5_aso_age_pool *pool = NULL;
12015         struct mlx5_devx_obj *obj = NULL;
12016         uint32_t i;
12017
12018         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12019                                                     priv->sh->cdev->pdn);
12020         if (!obj) {
12021                 rte_errno = ENODATA;
12022                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12023                 return NULL;
12024         }
12025         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12026         if (!pool) {
12027                 claim_zero(mlx5_devx_cmd_destroy(obj));
12028                 rte_errno = ENOMEM;
12029                 return NULL;
12030         }
12031         pool->flow_hit_aso_obj = obj;
12032         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12033         rte_rwlock_write_lock(&mng->resize_rwl);
12034         pool->index = mng->next;
12035         /* Resize pools array if there is no room for the new pool in it. */
12036         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12037                 claim_zero(mlx5_devx_cmd_destroy(obj));
12038                 mlx5_free(pool);
12039                 rte_rwlock_write_unlock(&mng->resize_rwl);
12040                 return NULL;
12041         }
12042         mng->pools[pool->index] = pool;
12043         mng->next++;
12044         rte_rwlock_write_unlock(&mng->resize_rwl);
12045         /* Assign the first action in the new pool, the rest go to free list. */
12046         *age_free = &pool->actions[0];
12047         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12048                 pool->actions[i].offset = i;
12049                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12050         }
12051         return pool;
12052 }
12053
12054 /**
12055  * Allocate a ASO aging bit.
12056  *
12057  * @param[in] dev
12058  *   Pointer to the Ethernet device structure.
12059  * @param[out] error
12060  *   Pointer to the error structure.
12061  *
12062  * @return
12063  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12064  */
12065 static uint32_t
12066 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12067 {
12068         struct mlx5_priv *priv = dev->data->dev_private;
12069         const struct mlx5_aso_age_pool *pool;
12070         struct mlx5_aso_age_action *age_free = NULL;
12071         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12072
12073         MLX5_ASSERT(mng);
12074         /* Try to get the next free age action bit. */
12075         rte_spinlock_lock(&mng->free_sl);
12076         age_free = LIST_FIRST(&mng->free);
12077         if (age_free) {
12078                 LIST_REMOVE(age_free, next);
12079         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12080                 rte_spinlock_unlock(&mng->free_sl);
12081                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12082                                    NULL, "failed to create ASO age pool");
12083                 return 0; /* 0 is an error. */
12084         }
12085         rte_spinlock_unlock(&mng->free_sl);
12086         pool = container_of
12087           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12088                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12089                                                                        actions);
12090         if (!age_free->dr_action) {
12091                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12092                                                  error);
12093
12094                 if (reg_c < 0) {
12095                         rte_flow_error_set(error, rte_errno,
12096                                            RTE_FLOW_ERROR_TYPE_ACTION,
12097                                            NULL, "failed to get reg_c "
12098                                            "for ASO flow hit");
12099                         return 0; /* 0 is an error. */
12100                 }
12101 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12102                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12103                                 (priv->sh->rx_domain,
12104                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12105                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12106                                  (reg_c - REG_C_0));
12107 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12108                 if (!age_free->dr_action) {
12109                         rte_errno = errno;
12110                         rte_spinlock_lock(&mng->free_sl);
12111                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12112                         rte_spinlock_unlock(&mng->free_sl);
12113                         rte_flow_error_set(error, rte_errno,
12114                                            RTE_FLOW_ERROR_TYPE_ACTION,
12115                                            NULL, "failed to create ASO "
12116                                            "flow hit action");
12117                         return 0; /* 0 is an error. */
12118                 }
12119         }
12120         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12121         return pool->index | ((age_free->offset + 1) << 16);
12122 }
12123
12124 /**
12125  * Initialize flow ASO age parameters.
12126  *
12127  * @param[in] dev
12128  *   Pointer to rte_eth_dev structure.
12129  * @param[in] age_idx
12130  *   Index of ASO age action.
12131  * @param[in] context
12132  *   Pointer to flow counter age context.
12133  * @param[in] timeout
12134  *   Aging timeout in seconds.
12135  *
12136  */
12137 static void
12138 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12139                             uint32_t age_idx,
12140                             void *context,
12141                             uint32_t timeout)
12142 {
12143         struct mlx5_aso_age_action *aso_age;
12144
12145         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12146         MLX5_ASSERT(aso_age);
12147         aso_age->age_params.context = context;
12148         aso_age->age_params.timeout = timeout;
12149         aso_age->age_params.port_id = dev->data->port_id;
12150         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12151                          __ATOMIC_RELAXED);
12152         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12153                          __ATOMIC_RELAXED);
12154 }
12155
12156 static void
12157 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12158                                const struct rte_flow_item_integrity *value,
12159                                void *headers_m, void *headers_v)
12160 {
12161         if (mask->l4_ok) {
12162                 /* RTE l4_ok filter aggregates hardware l4_ok and
12163                  * l4_checksum_ok filters.
12164                  * Positive RTE l4_ok match requires hardware match on both L4
12165                  * hardware integrity bits.
12166                  * For negative match, check hardware l4_checksum_ok bit only,
12167                  * because hardware sets that bit to 0 for all packets
12168                  * with bad L4.
12169                  */
12170                 if (value->l4_ok) {
12171                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12172                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12173                 }
12174                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12175                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12176                          !!value->l4_ok);
12177         }
12178         if (mask->l4_csum_ok) {
12179                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12180                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12181                          value->l4_csum_ok);
12182         }
12183 }
12184
12185 static void
12186 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12187                                const struct rte_flow_item_integrity *value,
12188                                void *headers_m, void *headers_v, bool is_ipv4)
12189 {
12190         if (mask->l3_ok) {
12191                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12192                  * ipv4_csum_ok filters.
12193                  * Positive RTE l3_ok match requires hardware match on both L3
12194                  * hardware integrity bits.
12195                  * For negative match, check hardware l3_csum_ok bit only,
12196                  * because hardware sets that bit to 0 for all packets
12197                  * with bad L3.
12198                  */
12199                 if (is_ipv4) {
12200                         if (value->l3_ok) {
12201                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12202                                          l3_ok, 1);
12203                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12204                                          l3_ok, 1);
12205                         }
12206                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12207                                  ipv4_checksum_ok, 1);
12208                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12209                                  ipv4_checksum_ok, !!value->l3_ok);
12210                 } else {
12211                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12212                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12213                                  value->l3_ok);
12214                 }
12215         }
12216         if (mask->ipv4_csum_ok) {
12217                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12218                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12219                          value->ipv4_csum_ok);
12220         }
12221 }
12222
12223 static void
12224 set_integrity_bits(void *headers_m, void *headers_v,
12225                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12226 {
12227         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12228         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12229
12230         /* Integrity bits validation cleared spec pointer */
12231         MLX5_ASSERT(spec != NULL);
12232         if (!mask)
12233                 mask = &rte_flow_item_integrity_mask;
12234         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12235                                        is_l3_ip4);
12236         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12237 }
12238
12239 static void
12240 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12241                                       const
12242                                       struct rte_flow_item *integrity_items[2],
12243                                       uint64_t pattern_flags)
12244 {
12245         void *headers_m, *headers_v;
12246         bool is_l3_ip4;
12247
12248         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12249                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12250                                          inner_headers);
12251                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12252                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12253                             0;
12254                 set_integrity_bits(headers_m, headers_v,
12255                                    integrity_items[1], is_l3_ip4);
12256         }
12257         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12258                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12259                                          outer_headers);
12260                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12261                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12262                             0;
12263                 set_integrity_bits(headers_m, headers_v,
12264                                    integrity_items[0], is_l3_ip4);
12265         }
12266 }
12267
12268 static void
12269 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12270                                  const struct rte_flow_item *integrity_items[2],
12271                                  uint64_t *last_item)
12272 {
12273         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12274
12275         /* integrity bits validation cleared spec pointer */
12276         MLX5_ASSERT(spec != NULL);
12277         if (spec->level > 1) {
12278                 integrity_items[1] = item;
12279                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12280         } else {
12281                 integrity_items[0] = item;
12282                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12283         }
12284 }
12285
12286 /**
12287  * Prepares DV flow counter with aging configuration.
12288  * Gets it by index when exists, creates a new one when doesn't.
12289  *
12290  * @param[in] dev
12291  *   Pointer to rte_eth_dev structure.
12292  * @param[in] dev_flow
12293  *   Pointer to the mlx5_flow.
12294  * @param[in, out] flow
12295  *   Pointer to the sub flow.
12296  * @param[in] count
12297  *   Pointer to the counter action configuration.
12298  * @param[in] age
12299  *   Pointer to the aging action configuration.
12300  * @param[out] error
12301  *   Pointer to the error structure.
12302  *
12303  * @return
12304  *   Pointer to the counter, NULL otherwise.
12305  */
12306 static struct mlx5_flow_counter *
12307 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12308                         struct mlx5_flow *dev_flow,
12309                         struct rte_flow *flow,
12310                         const struct rte_flow_action_count *count,
12311                         const struct rte_flow_action_age *age,
12312                         struct rte_flow_error *error)
12313 {
12314         if (!flow->counter) {
12315                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12316                                                                  count, age);
12317                 if (!flow->counter) {
12318                         rte_flow_error_set(error, rte_errno,
12319                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12320                                            "cannot create counter object.");
12321                         return NULL;
12322                 }
12323         }
12324         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12325 }
12326
12327 /*
12328  * Release an ASO CT action by its own device.
12329  *
12330  * @param[in] dev
12331  *   Pointer to the Ethernet device structure.
12332  * @param[in] idx
12333  *   Index of ASO CT action to release.
12334  *
12335  * @return
12336  *   0 when CT action was removed, otherwise the number of references.
12337  */
12338 static inline int
12339 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12340 {
12341         struct mlx5_priv *priv = dev->data->dev_private;
12342         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12343         uint32_t ret;
12344         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12345         enum mlx5_aso_ct_state state =
12346                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12347
12348         /* Cannot release when CT is in the ASO SQ. */
12349         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12350                 return -1;
12351         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12352         if (!ret) {
12353                 if (ct->dr_action_orig) {
12354 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12355                         claim_zero(mlx5_glue->destroy_flow_action
12356                                         (ct->dr_action_orig));
12357 #endif
12358                         ct->dr_action_orig = NULL;
12359                 }
12360                 if (ct->dr_action_rply) {
12361 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12362                         claim_zero(mlx5_glue->destroy_flow_action
12363                                         (ct->dr_action_rply));
12364 #endif
12365                         ct->dr_action_rply = NULL;
12366                 }
12367                 /* Clear the state to free, no need in 1st allocation. */
12368                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12369                 rte_spinlock_lock(&mng->ct_sl);
12370                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12371                 rte_spinlock_unlock(&mng->ct_sl);
12372         }
12373         return (int)ret;
12374 }
12375
12376 static inline int
12377 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12378                        struct rte_flow_error *error)
12379 {
12380         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12381         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12382         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12383         int ret;
12384
12385         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12386         if (dev->data->dev_started != 1)
12387                 return rte_flow_error_set(error, EAGAIN,
12388                                           RTE_FLOW_ERROR_TYPE_ACTION,
12389                                           NULL,
12390                                           "Indirect CT action cannot be destroyed when the port is stopped");
12391         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12392         if (ret < 0)
12393                 return rte_flow_error_set(error, EAGAIN,
12394                                           RTE_FLOW_ERROR_TYPE_ACTION,
12395                                           NULL,
12396                                           "Current state prevents indirect CT action from being destroyed");
12397         return ret;
12398 }
12399
12400 /*
12401  * Resize the ASO CT pools array by 64 pools.
12402  *
12403  * @param[in] dev
12404  *   Pointer to the Ethernet device structure.
12405  *
12406  * @return
12407  *   0 on success, otherwise negative errno value and rte_errno is set.
12408  */
12409 static int
12410 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12411 {
12412         struct mlx5_priv *priv = dev->data->dev_private;
12413         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12414         void *old_pools = mng->pools;
12415         /* Magic number now, need a macro. */
12416         uint32_t resize = mng->n + 64;
12417         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12418         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12419
12420         if (!pools) {
12421                 rte_errno = ENOMEM;
12422                 return -rte_errno;
12423         }
12424         rte_rwlock_write_lock(&mng->resize_rwl);
12425         /* ASO SQ/QP was already initialized in the startup. */
12426         if (old_pools) {
12427                 /* Realloc could be an alternative choice. */
12428                 rte_memcpy(pools, old_pools,
12429                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12430                 mlx5_free(old_pools);
12431         }
12432         mng->n = resize;
12433         mng->pools = pools;
12434         rte_rwlock_write_unlock(&mng->resize_rwl);
12435         return 0;
12436 }
12437
12438 /*
12439  * Create and initialize a new ASO CT pool.
12440  *
12441  * @param[in] dev
12442  *   Pointer to the Ethernet device structure.
12443  * @param[out] ct_free
12444  *   Where to put the pointer of a new CT action.
12445  *
12446  * @return
12447  *   The CT actions pool pointer and @p ct_free is set on success,
12448  *   NULL otherwise and rte_errno is set.
12449  */
12450 static struct mlx5_aso_ct_pool *
12451 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12452                        struct mlx5_aso_ct_action **ct_free)
12453 {
12454         struct mlx5_priv *priv = dev->data->dev_private;
12455         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12456         struct mlx5_aso_ct_pool *pool = NULL;
12457         struct mlx5_devx_obj *obj = NULL;
12458         uint32_t i;
12459         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12460
12461         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12462                                                           priv->sh->cdev->pdn,
12463                                                           log_obj_size);
12464         if (!obj) {
12465                 rte_errno = ENODATA;
12466                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12467                 return NULL;
12468         }
12469         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12470         if (!pool) {
12471                 rte_errno = ENOMEM;
12472                 claim_zero(mlx5_devx_cmd_destroy(obj));
12473                 return NULL;
12474         }
12475         pool->devx_obj = obj;
12476         pool->index = mng->next;
12477         /* Resize pools array if there is no room for the new pool in it. */
12478         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12479                 claim_zero(mlx5_devx_cmd_destroy(obj));
12480                 mlx5_free(pool);
12481                 return NULL;
12482         }
12483         mng->pools[pool->index] = pool;
12484         mng->next++;
12485         /* Assign the first action in the new pool, the rest go to free list. */
12486         *ct_free = &pool->actions[0];
12487         /* Lock outside, the list operation is safe here. */
12488         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12489                 /* refcnt is 0 when allocating the memory. */
12490                 pool->actions[i].offset = i;
12491                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12492         }
12493         return pool;
12494 }
12495
12496 /*
12497  * Allocate a ASO CT action from free list.
12498  *
12499  * @param[in] dev
12500  *   Pointer to the Ethernet device structure.
12501  * @param[out] error
12502  *   Pointer to the error structure.
12503  *
12504  * @return
12505  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12506  */
12507 static uint32_t
12508 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12509 {
12510         struct mlx5_priv *priv = dev->data->dev_private;
12511         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12512         struct mlx5_aso_ct_action *ct = NULL;
12513         struct mlx5_aso_ct_pool *pool;
12514         uint8_t reg_c;
12515         uint32_t ct_idx;
12516
12517         MLX5_ASSERT(mng);
12518         if (!priv->sh->devx) {
12519                 rte_errno = ENOTSUP;
12520                 return 0;
12521         }
12522         /* Get a free CT action, if no, a new pool will be created. */
12523         rte_spinlock_lock(&mng->ct_sl);
12524         ct = LIST_FIRST(&mng->free_cts);
12525         if (ct) {
12526                 LIST_REMOVE(ct, next);
12527         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12528                 rte_spinlock_unlock(&mng->ct_sl);
12529                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12530                                    NULL, "failed to create ASO CT pool");
12531                 return 0;
12532         }
12533         rte_spinlock_unlock(&mng->ct_sl);
12534         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12535         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12536         /* 0: inactive, 1: created, 2+: used by flows. */
12537         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12538         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12539         if (!ct->dr_action_orig) {
12540 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12541                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12542                         (priv->sh->rx_domain, pool->devx_obj->obj,
12543                          ct->offset,
12544                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12545                          reg_c - REG_C_0);
12546 #else
12547                 RTE_SET_USED(reg_c);
12548 #endif
12549                 if (!ct->dr_action_orig) {
12550                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12551                         rte_flow_error_set(error, rte_errno,
12552                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12553                                            "failed to create ASO CT action");
12554                         return 0;
12555                 }
12556         }
12557         if (!ct->dr_action_rply) {
12558 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12559                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12560                         (priv->sh->rx_domain, pool->devx_obj->obj,
12561                          ct->offset,
12562                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12563                          reg_c - REG_C_0);
12564 #endif
12565                 if (!ct->dr_action_rply) {
12566                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12567                         rte_flow_error_set(error, rte_errno,
12568                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12569                                            "failed to create ASO CT action");
12570                         return 0;
12571                 }
12572         }
12573         return ct_idx;
12574 }
12575
12576 /*
12577  * Create a conntrack object with context and actions by using ASO mechanism.
12578  *
12579  * @param[in] dev
12580  *   Pointer to rte_eth_dev structure.
12581  * @param[in] pro
12582  *   Pointer to conntrack information profile.
12583  * @param[out] error
12584  *   Pointer to the error structure.
12585  *
12586  * @return
12587  *   Index to conntrack object on success, 0 otherwise.
12588  */
12589 static uint32_t
12590 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12591                                    const struct rte_flow_action_conntrack *pro,
12592                                    struct rte_flow_error *error)
12593 {
12594         struct mlx5_priv *priv = dev->data->dev_private;
12595         struct mlx5_dev_ctx_shared *sh = priv->sh;
12596         struct mlx5_aso_ct_action *ct;
12597         uint32_t idx;
12598
12599         if (!sh->ct_aso_en)
12600                 return rte_flow_error_set(error, ENOTSUP,
12601                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12602                                           "Connection is not supported");
12603         idx = flow_dv_aso_ct_alloc(dev, error);
12604         if (!idx)
12605                 return rte_flow_error_set(error, rte_errno,
12606                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12607                                           "Failed to allocate CT object");
12608         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12609         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12610                 return rte_flow_error_set(error, EBUSY,
12611                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12612                                           "Failed to update CT");
12613         ct->is_original = !!pro->is_original_dir;
12614         ct->peer = pro->peer_port;
12615         return idx;
12616 }
12617
12618 /**
12619  * Fill the flow with DV spec, lock free
12620  * (mutex should be acquired by caller).
12621  *
12622  * @param[in] dev
12623  *   Pointer to rte_eth_dev structure.
12624  * @param[in, out] dev_flow
12625  *   Pointer to the sub flow.
12626  * @param[in] attr
12627  *   Pointer to the flow attributes.
12628  * @param[in] items
12629  *   Pointer to the list of items.
12630  * @param[in] actions
12631  *   Pointer to the list of actions.
12632  * @param[out] error
12633  *   Pointer to the error structure.
12634  *
12635  * @return
12636  *   0 on success, a negative errno value otherwise and rte_errno is set.
12637  */
12638 static int
12639 flow_dv_translate(struct rte_eth_dev *dev,
12640                   struct mlx5_flow *dev_flow,
12641                   const struct rte_flow_attr *attr,
12642                   const struct rte_flow_item items[],
12643                   const struct rte_flow_action actions[],
12644                   struct rte_flow_error *error)
12645 {
12646         struct mlx5_priv *priv = dev->data->dev_private;
12647         struct mlx5_dev_config *dev_conf = &priv->config;
12648         struct rte_flow *flow = dev_flow->flow;
12649         struct mlx5_flow_handle *handle = dev_flow->handle;
12650         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12651         struct mlx5_flow_rss_desc *rss_desc;
12652         uint64_t item_flags = 0;
12653         uint64_t last_item = 0;
12654         uint64_t action_flags = 0;
12655         struct mlx5_flow_dv_matcher matcher = {
12656                 .mask = {
12657                         .size = sizeof(matcher.mask.buf),
12658                 },
12659         };
12660         int actions_n = 0;
12661         bool actions_end = false;
12662         union {
12663                 struct mlx5_flow_dv_modify_hdr_resource res;
12664                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12665                             sizeof(struct mlx5_modification_cmd) *
12666                             (MLX5_MAX_MODIFY_NUM + 1)];
12667         } mhdr_dummy;
12668         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12669         const struct rte_flow_action_count *count = NULL;
12670         const struct rte_flow_action_age *non_shared_age = NULL;
12671         union flow_dv_attr flow_attr = { .attr = 0 };
12672         uint32_t tag_be;
12673         union mlx5_flow_tbl_key tbl_key;
12674         uint32_t modify_action_position = UINT32_MAX;
12675         void *match_mask = matcher.mask.buf;
12676         void *match_value = dev_flow->dv.value.buf;
12677         uint8_t next_protocol = 0xff;
12678         struct rte_vlan_hdr vlan = { 0 };
12679         struct mlx5_flow_dv_dest_array_resource mdest_res;
12680         struct mlx5_flow_dv_sample_resource sample_res;
12681         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12682         const struct rte_flow_action_sample *sample = NULL;
12683         struct mlx5_flow_sub_actions_list *sample_act;
12684         uint32_t sample_act_pos = UINT32_MAX;
12685         uint32_t age_act_pos = UINT32_MAX;
12686         uint32_t num_of_dest = 0;
12687         int tmp_actions_n = 0;
12688         uint32_t table;
12689         int ret = 0;
12690         const struct mlx5_flow_tunnel *tunnel = NULL;
12691         struct flow_grp_info grp_info = {
12692                 .external = !!dev_flow->external,
12693                 .transfer = !!attr->transfer,
12694                 .fdb_def_rule = !!priv->fdb_def_rule,
12695                 .skip_scale = dev_flow->skip_scale &
12696                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12697                 .std_tbl_fix = true,
12698         };
12699         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12700         const struct rte_flow_item *tunnel_item = NULL;
12701
12702         if (!wks)
12703                 return rte_flow_error_set(error, ENOMEM,
12704                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12705                                           NULL,
12706                                           "failed to push flow workspace");
12707         rss_desc = &wks->rss_desc;
12708         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12709         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12710         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12711                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12712         /* update normal path action resource into last index of array */
12713         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12714         if (is_tunnel_offload_active(dev)) {
12715                 if (dev_flow->tunnel) {
12716                         RTE_VERIFY(dev_flow->tof_type ==
12717                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12718                         tunnel = dev_flow->tunnel;
12719                 } else {
12720                         tunnel = mlx5_get_tof(items, actions,
12721                                               &dev_flow->tof_type);
12722                         dev_flow->tunnel = tunnel;
12723                 }
12724                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12725                                         (dev, attr, tunnel, dev_flow->tof_type);
12726         }
12727         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12728                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12729         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12730                                        &grp_info, error);
12731         if (ret)
12732                 return ret;
12733         dev_flow->dv.group = table;
12734         if (attr->transfer)
12735                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12736         /* number of actions must be set to 0 in case of dirty stack. */
12737         mhdr_res->actions_num = 0;
12738         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12739                 /*
12740                  * do not add decap action if match rule drops packet
12741                  * HW rejects rules with decap & drop
12742                  *
12743                  * if tunnel match rule was inserted before matching tunnel set
12744                  * rule flow table used in the match rule must be registered.
12745                  * current implementation handles that in the
12746                  * flow_dv_match_register() at the function end.
12747                  */
12748                 bool add_decap = true;
12749                 const struct rte_flow_action *ptr = actions;
12750
12751                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12752                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12753                                 add_decap = false;
12754                                 break;
12755                         }
12756                 }
12757                 if (add_decap) {
12758                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12759                                                            attr->transfer,
12760                                                            error))
12761                                 return -rte_errno;
12762                         dev_flow->dv.actions[actions_n++] =
12763                                         dev_flow->dv.encap_decap->action;
12764                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12765                 }
12766         }
12767         for (; !actions_end ; actions++) {
12768                 const struct rte_flow_action_queue *queue;
12769                 const struct rte_flow_action_rss *rss;
12770                 const struct rte_flow_action *action = actions;
12771                 const uint8_t *rss_key;
12772                 struct mlx5_flow_tbl_resource *tbl;
12773                 struct mlx5_aso_age_action *age_act;
12774                 struct mlx5_flow_counter *cnt_act;
12775                 uint32_t port_id = 0;
12776                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12777                 int action_type = actions->type;
12778                 const struct rte_flow_action *found_action = NULL;
12779                 uint32_t jump_group = 0;
12780                 uint32_t owner_idx;
12781                 struct mlx5_aso_ct_action *ct;
12782
12783                 if (!mlx5_flow_os_action_supported(action_type))
12784                         return rte_flow_error_set(error, ENOTSUP,
12785                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12786                                                   actions,
12787                                                   "action not supported");
12788                 switch (action_type) {
12789                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12790                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12791                         break;
12792                 case RTE_FLOW_ACTION_TYPE_VOID:
12793                         break;
12794                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12795                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12796                         if (flow_dv_translate_action_port_id(dev, action,
12797                                                              &port_id, error))
12798                                 return -rte_errno;
12799                         port_id_resource.port_id = port_id;
12800                         MLX5_ASSERT(!handle->rix_port_id_action);
12801                         if (flow_dv_port_id_action_resource_register
12802                             (dev, &port_id_resource, dev_flow, error))
12803                                 return -rte_errno;
12804                         dev_flow->dv.actions[actions_n++] =
12805                                         dev_flow->dv.port_id_action->action;
12806                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12807                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12808                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12809                         num_of_dest++;
12810                         break;
12811                 case RTE_FLOW_ACTION_TYPE_FLAG:
12812                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12813                         wks->mark = 1;
12814                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12815                                 struct rte_flow_action_mark mark = {
12816                                         .id = MLX5_FLOW_MARK_DEFAULT,
12817                                 };
12818
12819                                 if (flow_dv_convert_action_mark(dev, &mark,
12820                                                                 mhdr_res,
12821                                                                 error))
12822                                         return -rte_errno;
12823                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12824                                 break;
12825                         }
12826                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12827                         /*
12828                          * Only one FLAG or MARK is supported per device flow
12829                          * right now. So the pointer to the tag resource must be
12830                          * zero before the register process.
12831                          */
12832                         MLX5_ASSERT(!handle->dvh.rix_tag);
12833                         if (flow_dv_tag_resource_register(dev, tag_be,
12834                                                           dev_flow, error))
12835                                 return -rte_errno;
12836                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12837                         dev_flow->dv.actions[actions_n++] =
12838                                         dev_flow->dv.tag_resource->action;
12839                         break;
12840                 case RTE_FLOW_ACTION_TYPE_MARK:
12841                         action_flags |= MLX5_FLOW_ACTION_MARK;
12842                         wks->mark = 1;
12843                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12844                                 const struct rte_flow_action_mark *mark =
12845                                         (const struct rte_flow_action_mark *)
12846                                                 actions->conf;
12847
12848                                 if (flow_dv_convert_action_mark(dev, mark,
12849                                                                 mhdr_res,
12850                                                                 error))
12851                                         return -rte_errno;
12852                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12853                                 break;
12854                         }
12855                         /* Fall-through */
12856                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12857                         /* Legacy (non-extensive) MARK action. */
12858                         tag_be = mlx5_flow_mark_set
12859                               (((const struct rte_flow_action_mark *)
12860                                (actions->conf))->id);
12861                         MLX5_ASSERT(!handle->dvh.rix_tag);
12862                         if (flow_dv_tag_resource_register(dev, tag_be,
12863                                                           dev_flow, error))
12864                                 return -rte_errno;
12865                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12866                         dev_flow->dv.actions[actions_n++] =
12867                                         dev_flow->dv.tag_resource->action;
12868                         break;
12869                 case RTE_FLOW_ACTION_TYPE_SET_META:
12870                         if (flow_dv_convert_action_set_meta
12871                                 (dev, mhdr_res, attr,
12872                                  (const struct rte_flow_action_set_meta *)
12873                                   actions->conf, error))
12874                                 return -rte_errno;
12875                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12876                         break;
12877                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12878                         if (flow_dv_convert_action_set_tag
12879                                 (dev, mhdr_res,
12880                                  (const struct rte_flow_action_set_tag *)
12881                                   actions->conf, error))
12882                                 return -rte_errno;
12883                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12884                         break;
12885                 case RTE_FLOW_ACTION_TYPE_DROP:
12886                         action_flags |= MLX5_FLOW_ACTION_DROP;
12887                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12888                         break;
12889                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12890                         queue = actions->conf;
12891                         rss_desc->queue_num = 1;
12892                         rss_desc->queue[0] = queue->index;
12893                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12894                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12895                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12896                         num_of_dest++;
12897                         break;
12898                 case RTE_FLOW_ACTION_TYPE_RSS:
12899                         rss = actions->conf;
12900                         memcpy(rss_desc->queue, rss->queue,
12901                                rss->queue_num * sizeof(uint16_t));
12902                         rss_desc->queue_num = rss->queue_num;
12903                         /* NULL RSS key indicates default RSS key. */
12904                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12905                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12906                         /*
12907                          * rss->level and rss.types should be set in advance
12908                          * when expanding items for RSS.
12909                          */
12910                         action_flags |= MLX5_FLOW_ACTION_RSS;
12911                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12912                                 MLX5_FLOW_FATE_SHARED_RSS :
12913                                 MLX5_FLOW_FATE_QUEUE;
12914                         break;
12915                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12916                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12917                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12918                         if (flow->age == 0) {
12919                                 flow->age = owner_idx;
12920                                 __atomic_fetch_add(&age_act->refcnt, 1,
12921                                                    __ATOMIC_RELAXED);
12922                         }
12923                         age_act_pos = actions_n++;
12924                         action_flags |= MLX5_FLOW_ACTION_AGE;
12925                         break;
12926                 case RTE_FLOW_ACTION_TYPE_AGE:
12927                         non_shared_age = action->conf;
12928                         age_act_pos = actions_n++;
12929                         action_flags |= MLX5_FLOW_ACTION_AGE;
12930                         break;
12931                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12932                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12933                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12934                                                              NULL);
12935                         MLX5_ASSERT(cnt_act != NULL);
12936                         /**
12937                          * When creating meter drop flow in drop table, the
12938                          * counter should not overwrite the rte flow counter.
12939                          */
12940                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12941                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12942                                 dev_flow->dv.actions[actions_n++] =
12943                                                         cnt_act->action;
12944                         } else {
12945                                 if (flow->counter == 0) {
12946                                         flow->counter = owner_idx;
12947                                         __atomic_fetch_add
12948                                                 (&cnt_act->shared_info.refcnt,
12949                                                  1, __ATOMIC_RELAXED);
12950                                 }
12951                                 /* Save information first, will apply later. */
12952                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12953                         }
12954                         break;
12955                 case RTE_FLOW_ACTION_TYPE_COUNT:
12956                         if (!priv->sh->devx) {
12957                                 return rte_flow_error_set
12958                                               (error, ENOTSUP,
12959                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12960                                                NULL,
12961                                                "count action not supported");
12962                         }
12963                         /* Save information first, will apply later. */
12964                         count = action->conf;
12965                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12966                         break;
12967                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12968                         dev_flow->dv.actions[actions_n++] =
12969                                                 priv->sh->pop_vlan_action;
12970                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12971                         break;
12972                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12973                         if (!(action_flags &
12974                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12975                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12976                         vlan.eth_proto = rte_be_to_cpu_16
12977                              ((((const struct rte_flow_action_of_push_vlan *)
12978                                                    actions->conf)->ethertype));
12979                         found_action = mlx5_flow_find_action
12980                                         (actions + 1,
12981                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12982                         if (found_action)
12983                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12984                         found_action = mlx5_flow_find_action
12985                                         (actions + 1,
12986                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12987                         if (found_action)
12988                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12989                         if (flow_dv_create_action_push_vlan
12990                                             (dev, attr, &vlan, dev_flow, error))
12991                                 return -rte_errno;
12992                         dev_flow->dv.actions[actions_n++] =
12993                                         dev_flow->dv.push_vlan_res->action;
12994                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12995                         break;
12996                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12997                         /* of_vlan_push action handled this action */
12998                         MLX5_ASSERT(action_flags &
12999                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
13000                         break;
13001                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13002                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13003                                 break;
13004                         flow_dev_get_vlan_info_from_items(items, &vlan);
13005                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13006                         /* If no VLAN push - this is a modify header action */
13007                         if (flow_dv_convert_action_modify_vlan_vid
13008                                                 (mhdr_res, actions, error))
13009                                 return -rte_errno;
13010                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13011                         break;
13012                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13013                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13014                         if (flow_dv_create_action_l2_encap(dev, actions,
13015                                                            dev_flow,
13016                                                            attr->transfer,
13017                                                            error))
13018                                 return -rte_errno;
13019                         dev_flow->dv.actions[actions_n++] =
13020                                         dev_flow->dv.encap_decap->action;
13021                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13022                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13023                                 sample_act->action_flags |=
13024                                                         MLX5_FLOW_ACTION_ENCAP;
13025                         break;
13026                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13027                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13028                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13029                                                            attr->transfer,
13030                                                            error))
13031                                 return -rte_errno;
13032                         dev_flow->dv.actions[actions_n++] =
13033                                         dev_flow->dv.encap_decap->action;
13034                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13035                         break;
13036                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13037                         /* Handle encap with preceding decap. */
13038                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13039                                 if (flow_dv_create_action_raw_encap
13040                                         (dev, actions, dev_flow, attr, error))
13041                                         return -rte_errno;
13042                                 dev_flow->dv.actions[actions_n++] =
13043                                         dev_flow->dv.encap_decap->action;
13044                         } else {
13045                                 /* Handle encap without preceding decap. */
13046                                 if (flow_dv_create_action_l2_encap
13047                                     (dev, actions, dev_flow, attr->transfer,
13048                                      error))
13049                                         return -rte_errno;
13050                                 dev_flow->dv.actions[actions_n++] =
13051                                         dev_flow->dv.encap_decap->action;
13052                         }
13053                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13054                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13055                                 sample_act->action_flags |=
13056                                                         MLX5_FLOW_ACTION_ENCAP;
13057                         break;
13058                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13059                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13060                                 ;
13061                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13062                                 if (flow_dv_create_action_l2_decap
13063                                     (dev, dev_flow, attr->transfer, error))
13064                                         return -rte_errno;
13065                                 dev_flow->dv.actions[actions_n++] =
13066                                         dev_flow->dv.encap_decap->action;
13067                         }
13068                         /* If decap is followed by encap, handle it at encap. */
13069                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13070                         break;
13071                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13072                         dev_flow->dv.actions[actions_n++] =
13073                                 (void *)(uintptr_t)action->conf;
13074                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13075                         break;
13076                 case RTE_FLOW_ACTION_TYPE_JUMP:
13077                         jump_group = ((const struct rte_flow_action_jump *)
13078                                                         action->conf)->group;
13079                         grp_info.std_tbl_fix = 0;
13080                         if (dev_flow->skip_scale &
13081                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13082                                 grp_info.skip_scale = 1;
13083                         else
13084                                 grp_info.skip_scale = 0;
13085                         ret = mlx5_flow_group_to_table(dev, tunnel,
13086                                                        jump_group,
13087                                                        &table,
13088                                                        &grp_info, error);
13089                         if (ret)
13090                                 return ret;
13091                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13092                                                        attr->transfer,
13093                                                        !!dev_flow->external,
13094                                                        tunnel, jump_group, 0,
13095                                                        0, error);
13096                         if (!tbl)
13097                                 return rte_flow_error_set
13098                                                 (error, errno,
13099                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13100                                                  NULL,
13101                                                  "cannot create jump action.");
13102                         if (flow_dv_jump_tbl_resource_register
13103                             (dev, tbl, dev_flow, error)) {
13104                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13105                                 return rte_flow_error_set
13106                                                 (error, errno,
13107                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13108                                                  NULL,
13109                                                  "cannot create jump action.");
13110                         }
13111                         dev_flow->dv.actions[actions_n++] =
13112                                         dev_flow->dv.jump->action;
13113                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13114                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13115                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13116                         num_of_dest++;
13117                         break;
13118                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13119                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13120                         if (flow_dv_convert_action_modify_mac
13121                                         (mhdr_res, actions, error))
13122                                 return -rte_errno;
13123                         action_flags |= actions->type ==
13124                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13125                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13126                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13127                         break;
13128                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13129                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13130                         if (flow_dv_convert_action_modify_ipv4
13131                                         (mhdr_res, actions, error))
13132                                 return -rte_errno;
13133                         action_flags |= actions->type ==
13134                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13135                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13136                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13137                         break;
13138                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13139                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13140                         if (flow_dv_convert_action_modify_ipv6
13141                                         (mhdr_res, actions, error))
13142                                 return -rte_errno;
13143                         action_flags |= actions->type ==
13144                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13145                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13146                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13147                         break;
13148                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13149                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13150                         if (flow_dv_convert_action_modify_tp
13151                                         (mhdr_res, actions, items,
13152                                          &flow_attr, dev_flow, !!(action_flags &
13153                                          MLX5_FLOW_ACTION_DECAP), error))
13154                                 return -rte_errno;
13155                         action_flags |= actions->type ==
13156                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13157                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13158                                         MLX5_FLOW_ACTION_SET_TP_DST;
13159                         break;
13160                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13161                         if (flow_dv_convert_action_modify_dec_ttl
13162                                         (mhdr_res, items, &flow_attr, dev_flow,
13163                                          !!(action_flags &
13164                                          MLX5_FLOW_ACTION_DECAP), error))
13165                                 return -rte_errno;
13166                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13167                         break;
13168                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13169                         if (flow_dv_convert_action_modify_ttl
13170                                         (mhdr_res, actions, items, &flow_attr,
13171                                          dev_flow, !!(action_flags &
13172                                          MLX5_FLOW_ACTION_DECAP), error))
13173                                 return -rte_errno;
13174                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13175                         break;
13176                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13177                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13178                         if (flow_dv_convert_action_modify_tcp_seq
13179                                         (mhdr_res, actions, error))
13180                                 return -rte_errno;
13181                         action_flags |= actions->type ==
13182                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13183                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13184                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13185                         break;
13186
13187                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13188                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13189                         if (flow_dv_convert_action_modify_tcp_ack
13190                                         (mhdr_res, actions, error))
13191                                 return -rte_errno;
13192                         action_flags |= actions->type ==
13193                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13194                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13195                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13196                         break;
13197                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13198                         if (flow_dv_convert_action_set_reg
13199                                         (mhdr_res, actions, error))
13200                                 return -rte_errno;
13201                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13202                         break;
13203                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13204                         if (flow_dv_convert_action_copy_mreg
13205                                         (dev, mhdr_res, actions, error))
13206                                 return -rte_errno;
13207                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13208                         break;
13209                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13210                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13211                         dev_flow->handle->fate_action =
13212                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13213                         break;
13214                 case RTE_FLOW_ACTION_TYPE_METER:
13215                         if (!wks->fm)
13216                                 return rte_flow_error_set(error, rte_errno,
13217                                         RTE_FLOW_ERROR_TYPE_ACTION,
13218                                         NULL, "Failed to get meter in flow.");
13219                         /* Set the meter action. */
13220                         dev_flow->dv.actions[actions_n++] =
13221                                 wks->fm->meter_action;
13222                         action_flags |= MLX5_FLOW_ACTION_METER;
13223                         break;
13224                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13225                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13226                                                               actions, error))
13227                                 return -rte_errno;
13228                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13229                         break;
13230                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13231                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13232                                                               actions, error))
13233                                 return -rte_errno;
13234                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13235                         break;
13236                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13237                         sample_act_pos = actions_n;
13238                         sample = (const struct rte_flow_action_sample *)
13239                                  action->conf;
13240                         actions_n++;
13241                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13242                         /* put encap action into group if work with port id */
13243                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13244                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13245                                 sample_act->action_flags |=
13246                                                         MLX5_FLOW_ACTION_ENCAP;
13247                         break;
13248                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13249                         if (flow_dv_convert_action_modify_field
13250                                         (dev, mhdr_res, actions, attr, error))
13251                                 return -rte_errno;
13252                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13253                         break;
13254                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13255                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13256                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13257                         if (!ct)
13258                                 return rte_flow_error_set(error, EINVAL,
13259                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13260                                                 NULL,
13261                                                 "Failed to get CT object.");
13262                         if (mlx5_aso_ct_available(priv->sh, ct))
13263                                 return rte_flow_error_set(error, rte_errno,
13264                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13265                                                 NULL,
13266                                                 "CT is unavailable.");
13267                         if (ct->is_original)
13268                                 dev_flow->dv.actions[actions_n] =
13269                                                         ct->dr_action_orig;
13270                         else
13271                                 dev_flow->dv.actions[actions_n] =
13272                                                         ct->dr_action_rply;
13273                         if (flow->ct == 0) {
13274                                 flow->indirect_type =
13275                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13276                                 flow->ct = owner_idx;
13277                                 __atomic_fetch_add(&ct->refcnt, 1,
13278                                                    __ATOMIC_RELAXED);
13279                         }
13280                         actions_n++;
13281                         action_flags |= MLX5_FLOW_ACTION_CT;
13282                         break;
13283                 case RTE_FLOW_ACTION_TYPE_END:
13284                         actions_end = true;
13285                         if (mhdr_res->actions_num) {
13286                                 /* create modify action if needed. */
13287                                 if (flow_dv_modify_hdr_resource_register
13288                                         (dev, mhdr_res, dev_flow, error))
13289                                         return -rte_errno;
13290                                 dev_flow->dv.actions[modify_action_position] =
13291                                         handle->dvh.modify_hdr->action;
13292                         }
13293                         /*
13294                          * Handle AGE and COUNT action by single HW counter
13295                          * when they are not shared.
13296                          */
13297                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13298                                 if ((non_shared_age && count) ||
13299                                     !(priv->sh->flow_hit_aso_en &&
13300                                       (attr->group || attr->transfer))) {
13301                                         /* Creates age by counters. */
13302                                         cnt_act = flow_dv_prepare_counter
13303                                                                 (dev, dev_flow,
13304                                                                  flow, count,
13305                                                                  non_shared_age,
13306                                                                  error);
13307                                         if (!cnt_act)
13308                                                 return -rte_errno;
13309                                         dev_flow->dv.actions[age_act_pos] =
13310                                                                 cnt_act->action;
13311                                         break;
13312                                 }
13313                                 if (!flow->age && non_shared_age) {
13314                                         flow->age = flow_dv_aso_age_alloc
13315                                                                 (dev, error);
13316                                         if (!flow->age)
13317                                                 return -rte_errno;
13318                                         flow_dv_aso_age_params_init
13319                                                     (dev, flow->age,
13320                                                      non_shared_age->context ?
13321                                                      non_shared_age->context :
13322                                                      (void *)(uintptr_t)
13323                                                      (dev_flow->flow_idx),
13324                                                      non_shared_age->timeout);
13325                                 }
13326                                 age_act = flow_aso_age_get_by_idx(dev,
13327                                                                   flow->age);
13328                                 dev_flow->dv.actions[age_act_pos] =
13329                                                              age_act->dr_action;
13330                         }
13331                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13332                                 /*
13333                                  * Create one count action, to be used
13334                                  * by all sub-flows.
13335                                  */
13336                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13337                                                                   flow, count,
13338                                                                   NULL, error);
13339                                 if (!cnt_act)
13340                                         return -rte_errno;
13341                                 dev_flow->dv.actions[actions_n++] =
13342                                                                 cnt_act->action;
13343                         }
13344                 default:
13345                         break;
13346                 }
13347                 if (mhdr_res->actions_num &&
13348                     modify_action_position == UINT32_MAX)
13349                         modify_action_position = actions_n++;
13350         }
13351         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13352                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13353                 int item_type = items->type;
13354
13355                 if (!mlx5_flow_os_item_supported(item_type))
13356                         return rte_flow_error_set(error, ENOTSUP,
13357                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13358                                                   NULL, "item not supported");
13359                 switch (item_type) {
13360                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13361                         flow_dv_translate_item_port_id
13362                                 (dev, match_mask, match_value, items, attr);
13363                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13364                         break;
13365                 case RTE_FLOW_ITEM_TYPE_ETH:
13366                         flow_dv_translate_item_eth(match_mask, match_value,
13367                                                    items, tunnel,
13368                                                    dev_flow->dv.group);
13369                         matcher.priority = action_flags &
13370                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13371                                         !dev_flow->external ?
13372                                         MLX5_PRIORITY_MAP_L3 :
13373                                         MLX5_PRIORITY_MAP_L2;
13374                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13375                                              MLX5_FLOW_LAYER_OUTER_L2;
13376                         break;
13377                 case RTE_FLOW_ITEM_TYPE_VLAN:
13378                         flow_dv_translate_item_vlan(dev_flow,
13379                                                     match_mask, match_value,
13380                                                     items, tunnel,
13381                                                     dev_flow->dv.group);
13382                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13383                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13384                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13385                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13386                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13387                         break;
13388                 case RTE_FLOW_ITEM_TYPE_IPV4:
13389                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13390                                                   &item_flags, &tunnel);
13391                         flow_dv_translate_item_ipv4(match_mask, match_value,
13392                                                     items, tunnel,
13393                                                     dev_flow->dv.group);
13394                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13395                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13396                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13397                         if (items->mask != NULL &&
13398                             ((const struct rte_flow_item_ipv4 *)
13399                              items->mask)->hdr.next_proto_id) {
13400                                 next_protocol =
13401                                         ((const struct rte_flow_item_ipv4 *)
13402                                          (items->spec))->hdr.next_proto_id;
13403                                 next_protocol &=
13404                                         ((const struct rte_flow_item_ipv4 *)
13405                                          (items->mask))->hdr.next_proto_id;
13406                         } else {
13407                                 /* Reset for inner layer. */
13408                                 next_protocol = 0xff;
13409                         }
13410                         break;
13411                 case RTE_FLOW_ITEM_TYPE_IPV6:
13412                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13413                                                   &item_flags, &tunnel);
13414                         flow_dv_translate_item_ipv6(match_mask, match_value,
13415                                                     items, tunnel,
13416                                                     dev_flow->dv.group);
13417                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13418                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13419                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13420                         if (items->mask != NULL &&
13421                             ((const struct rte_flow_item_ipv6 *)
13422                              items->mask)->hdr.proto) {
13423                                 next_protocol =
13424                                         ((const struct rte_flow_item_ipv6 *)
13425                                          items->spec)->hdr.proto;
13426                                 next_protocol &=
13427                                         ((const struct rte_flow_item_ipv6 *)
13428                                          items->mask)->hdr.proto;
13429                         } else {
13430                                 /* Reset for inner layer. */
13431                                 next_protocol = 0xff;
13432                         }
13433                         break;
13434                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13435                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13436                                                              match_value,
13437                                                              items, tunnel);
13438                         last_item = tunnel ?
13439                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13440                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13441                         if (items->mask != NULL &&
13442                             ((const struct rte_flow_item_ipv6_frag_ext *)
13443                              items->mask)->hdr.next_header) {
13444                                 next_protocol =
13445                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13446                                  items->spec)->hdr.next_header;
13447                                 next_protocol &=
13448                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13449                                  items->mask)->hdr.next_header;
13450                         } else {
13451                                 /* Reset for inner layer. */
13452                                 next_protocol = 0xff;
13453                         }
13454                         break;
13455                 case RTE_FLOW_ITEM_TYPE_TCP:
13456                         flow_dv_translate_item_tcp(match_mask, match_value,
13457                                                    items, tunnel);
13458                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13459                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13460                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13461                         break;
13462                 case RTE_FLOW_ITEM_TYPE_UDP:
13463                         flow_dv_translate_item_udp(match_mask, match_value,
13464                                                    items, tunnel);
13465                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13466                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13467                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13468                         break;
13469                 case RTE_FLOW_ITEM_TYPE_GRE:
13470                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13471                         last_item = MLX5_FLOW_LAYER_GRE;
13472                         tunnel_item = items;
13473                         break;
13474                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13475                         flow_dv_translate_item_gre_key(match_mask,
13476                                                        match_value, items);
13477                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13478                         break;
13479                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13480                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13481                         last_item = MLX5_FLOW_LAYER_GRE;
13482                         tunnel_item = items;
13483                         break;
13484                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13485                         flow_dv_translate_item_vxlan(dev, attr,
13486                                                      match_mask, match_value,
13487                                                      items, tunnel);
13488                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13489                         last_item = MLX5_FLOW_LAYER_VXLAN;
13490                         break;
13491                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13492                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13493                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13494                         tunnel_item = items;
13495                         break;
13496                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13497                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13498                         last_item = MLX5_FLOW_LAYER_GENEVE;
13499                         tunnel_item = items;
13500                         break;
13501                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13502                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13503                                                           match_value,
13504                                                           items, error);
13505                         if (ret)
13506                                 return rte_flow_error_set(error, -ret,
13507                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13508                                         "cannot create GENEVE TLV option");
13509                         flow->geneve_tlv_option = 1;
13510                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13511                         break;
13512                 case RTE_FLOW_ITEM_TYPE_MPLS:
13513                         flow_dv_translate_item_mpls(match_mask, match_value,
13514                                                     items, last_item, tunnel);
13515                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13516                         last_item = MLX5_FLOW_LAYER_MPLS;
13517                         break;
13518                 case RTE_FLOW_ITEM_TYPE_MARK:
13519                         flow_dv_translate_item_mark(dev, match_mask,
13520                                                     match_value, items);
13521                         last_item = MLX5_FLOW_ITEM_MARK;
13522                         break;
13523                 case RTE_FLOW_ITEM_TYPE_META:
13524                         flow_dv_translate_item_meta(dev, match_mask,
13525                                                     match_value, attr, items);
13526                         last_item = MLX5_FLOW_ITEM_METADATA;
13527                         break;
13528                 case RTE_FLOW_ITEM_TYPE_ICMP:
13529                         flow_dv_translate_item_icmp(match_mask, match_value,
13530                                                     items, tunnel);
13531                         last_item = MLX5_FLOW_LAYER_ICMP;
13532                         break;
13533                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13534                         flow_dv_translate_item_icmp6(match_mask, match_value,
13535                                                       items, tunnel);
13536                         last_item = MLX5_FLOW_LAYER_ICMP6;
13537                         break;
13538                 case RTE_FLOW_ITEM_TYPE_TAG:
13539                         flow_dv_translate_item_tag(dev, match_mask,
13540                                                    match_value, items);
13541                         last_item = MLX5_FLOW_ITEM_TAG;
13542                         break;
13543                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13544                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13545                                                         match_value, items);
13546                         last_item = MLX5_FLOW_ITEM_TAG;
13547                         break;
13548                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13549                         flow_dv_translate_item_tx_queue(dev, match_mask,
13550                                                         match_value,
13551                                                         items);
13552                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13553                         break;
13554                 case RTE_FLOW_ITEM_TYPE_GTP:
13555                         flow_dv_translate_item_gtp(match_mask, match_value,
13556                                                    items, tunnel);
13557                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13558                         last_item = MLX5_FLOW_LAYER_GTP;
13559                         break;
13560                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13561                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13562                                                           match_value,
13563                                                           items);
13564                         if (ret)
13565                                 return rte_flow_error_set(error, -ret,
13566                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13567                                         "cannot create GTP PSC item");
13568                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13569                         break;
13570                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13571                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13572                                 /* Create it only the first time to be used. */
13573                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13574                                 if (ret)
13575                                         return rte_flow_error_set
13576                                                 (error, -ret,
13577                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13578                                                 NULL,
13579                                                 "cannot create eCPRI parser");
13580                         }
13581                         flow_dv_translate_item_ecpri(dev, match_mask,
13582                                                      match_value, items,
13583                                                      last_item);
13584                         /* No other protocol should follow eCPRI layer. */
13585                         last_item = MLX5_FLOW_LAYER_ECPRI;
13586                         break;
13587                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13588                         flow_dv_translate_item_integrity(items, integrity_items,
13589                                                          &last_item);
13590                         break;
13591                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13592                         flow_dv_translate_item_aso_ct(dev, match_mask,
13593                                                       match_value, items);
13594                         break;
13595                 case RTE_FLOW_ITEM_TYPE_FLEX:
13596                         flow_dv_translate_item_flex(dev, match_mask,
13597                                                     match_value, items,
13598                                                     dev_flow, tunnel != 0);
13599                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13600                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13601                         break;
13602                 default:
13603                         break;
13604                 }
13605                 item_flags |= last_item;
13606         }
13607         /*
13608          * When E-Switch mode is enabled, we have two cases where we need to
13609          * set the source port manually.
13610          * The first one, is in case of Nic steering rule, and the second is
13611          * E-Switch rule where no port_id item was found. In both cases
13612          * the source port is set according the current port in use.
13613          */
13614         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13615             (priv->representor || priv->master)) {
13616                 if (flow_dv_translate_item_port_id(dev, match_mask,
13617                                                    match_value, NULL, attr))
13618                         return -rte_errno;
13619         }
13620         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13621                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13622                                                       integrity_items,
13623                                                       item_flags);
13624         }
13625         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13626                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13627                                                  tunnel_item, item_flags);
13628         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13629                 flow_dv_translate_item_geneve(match_mask, match_value,
13630                                               tunnel_item, item_flags);
13631         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13632                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13633                         flow_dv_translate_item_gre(match_mask, match_value,
13634                                                    tunnel_item, item_flags);
13635                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13636                         flow_dv_translate_item_nvgre(match_mask, match_value,
13637                                                      tunnel_item, item_flags);
13638                 else
13639                         MLX5_ASSERT(false);
13640         }
13641 #ifdef RTE_LIBRTE_MLX5_DEBUG
13642         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13643                                               dev_flow->dv.value.buf));
13644 #endif
13645         /*
13646          * Layers may be already initialized from prefix flow if this dev_flow
13647          * is the suffix flow.
13648          */
13649         handle->layers |= item_flags;
13650         if (action_flags & MLX5_FLOW_ACTION_RSS)
13651                 flow_dv_hashfields_set(dev_flow, rss_desc);
13652         /* If has RSS action in the sample action, the Sample/Mirror resource
13653          * should be registered after the hash filed be update.
13654          */
13655         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13656                 ret = flow_dv_translate_action_sample(dev,
13657                                                       sample,
13658                                                       dev_flow, attr,
13659                                                       &num_of_dest,
13660                                                       sample_actions,
13661                                                       &sample_res,
13662                                                       error);
13663                 if (ret < 0)
13664                         return ret;
13665                 ret = flow_dv_create_action_sample(dev,
13666                                                    dev_flow,
13667                                                    num_of_dest,
13668                                                    &sample_res,
13669                                                    &mdest_res,
13670                                                    sample_actions,
13671                                                    action_flags,
13672                                                    error);
13673                 if (ret < 0)
13674                         return rte_flow_error_set
13675                                                 (error, rte_errno,
13676                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13677                                                 NULL,
13678                                                 "cannot create sample action");
13679                 if (num_of_dest > 1) {
13680                         dev_flow->dv.actions[sample_act_pos] =
13681                         dev_flow->dv.dest_array_res->action;
13682                 } else {
13683                         dev_flow->dv.actions[sample_act_pos] =
13684                         dev_flow->dv.sample_res->verbs_action;
13685                 }
13686         }
13687         /*
13688          * For multiple destination (sample action with ratio=1), the encap
13689          * action and port id action will be combined into group action.
13690          * So need remove the original these actions in the flow and only
13691          * use the sample action instead of.
13692          */
13693         if (num_of_dest > 1 &&
13694             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13695                 int i;
13696                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13697
13698                 for (i = 0; i < actions_n; i++) {
13699                         if ((sample_act->dr_encap_action &&
13700                                 sample_act->dr_encap_action ==
13701                                 dev_flow->dv.actions[i]) ||
13702                                 (sample_act->dr_port_id_action &&
13703                                 sample_act->dr_port_id_action ==
13704                                 dev_flow->dv.actions[i]) ||
13705                                 (sample_act->dr_jump_action &&
13706                                 sample_act->dr_jump_action ==
13707                                 dev_flow->dv.actions[i]))
13708                                 continue;
13709                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13710                 }
13711                 memcpy((void *)dev_flow->dv.actions,
13712                                 (void *)temp_actions,
13713                                 tmp_actions_n * sizeof(void *));
13714                 actions_n = tmp_actions_n;
13715         }
13716         dev_flow->dv.actions_n = actions_n;
13717         dev_flow->act_flags = action_flags;
13718         if (wks->skip_matcher_reg)
13719                 return 0;
13720         /* Register matcher. */
13721         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13722                                     matcher.mask.size);
13723         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13724                                                      matcher.priority,
13725                                                      dev_flow->external);
13726         /**
13727          * When creating meter drop flow in drop table, using original
13728          * 5-tuple match, the matcher priority should be lower than
13729          * mtr_id matcher.
13730          */
13731         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13732             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13733             matcher.priority <= MLX5_REG_BITS)
13734                 matcher.priority += MLX5_REG_BITS;
13735         /* reserved field no needs to be set to 0 here. */
13736         tbl_key.is_fdb = attr->transfer;
13737         tbl_key.is_egress = attr->egress;
13738         tbl_key.level = dev_flow->dv.group;
13739         tbl_key.id = dev_flow->dv.table_id;
13740         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13741                                      tunnel, attr->group, error))
13742                 return -rte_errno;
13743         return 0;
13744 }
13745
13746 /**
13747  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13748  * and tunnel.
13749  *
13750  * @param[in, out] action
13751  *   Shred RSS action holding hash RX queue objects.
13752  * @param[in] hash_fields
13753  *   Defines combination of packet fields to participate in RX hash.
13754  * @param[in] tunnel
13755  *   Tunnel type
13756  * @param[in] hrxq_idx
13757  *   Hash RX queue index to set.
13758  *
13759  * @return
13760  *   0 on success, otherwise negative errno value.
13761  */
13762 static int
13763 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13764                               const uint64_t hash_fields,
13765                               uint32_t hrxq_idx)
13766 {
13767         uint32_t *hrxqs = action->hrxq;
13768
13769         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13770         case MLX5_RSS_HASH_IPV4:
13771                 /* fall-through. */
13772         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13773                 /* fall-through. */
13774         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13775                 hrxqs[0] = hrxq_idx;
13776                 return 0;
13777         case MLX5_RSS_HASH_IPV4_TCP:
13778                 /* fall-through. */
13779         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13780                 /* fall-through. */
13781         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13782                 hrxqs[1] = hrxq_idx;
13783                 return 0;
13784         case MLX5_RSS_HASH_IPV4_UDP:
13785                 /* fall-through. */
13786         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13787                 /* fall-through. */
13788         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13789                 hrxqs[2] = hrxq_idx;
13790                 return 0;
13791         case MLX5_RSS_HASH_IPV6:
13792                 /* fall-through. */
13793         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13794                 /* fall-through. */
13795         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13796                 hrxqs[3] = hrxq_idx;
13797                 return 0;
13798         case MLX5_RSS_HASH_IPV6_TCP:
13799                 /* fall-through. */
13800         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13801                 /* fall-through. */
13802         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13803                 hrxqs[4] = hrxq_idx;
13804                 return 0;
13805         case MLX5_RSS_HASH_IPV6_UDP:
13806                 /* fall-through. */
13807         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13808                 /* fall-through. */
13809         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13810                 hrxqs[5] = hrxq_idx;
13811                 return 0;
13812         case MLX5_RSS_HASH_NONE:
13813                 hrxqs[6] = hrxq_idx;
13814                 return 0;
13815         default:
13816                 return -1;
13817         }
13818 }
13819
13820 /**
13821  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13822  * and tunnel.
13823  *
13824  * @param[in] dev
13825  *   Pointer to the Ethernet device structure.
13826  * @param[in] idx
13827  *   Shared RSS action ID holding hash RX queue objects.
13828  * @param[in] hash_fields
13829  *   Defines combination of packet fields to participate in RX hash.
13830  * @param[in] tunnel
13831  *   Tunnel type
13832  *
13833  * @return
13834  *   Valid hash RX queue index, otherwise 0.
13835  */
13836 static uint32_t
13837 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13838                                  const uint64_t hash_fields)
13839 {
13840         struct mlx5_priv *priv = dev->data->dev_private;
13841         struct mlx5_shared_action_rss *shared_rss =
13842             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13843         const uint32_t *hrxqs = shared_rss->hrxq;
13844
13845         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13846         case MLX5_RSS_HASH_IPV4:
13847                 /* fall-through. */
13848         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13849                 /* fall-through. */
13850         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13851                 return hrxqs[0];
13852         case MLX5_RSS_HASH_IPV4_TCP:
13853                 /* fall-through. */
13854         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13855                 /* fall-through. */
13856         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13857                 return hrxqs[1];
13858         case MLX5_RSS_HASH_IPV4_UDP:
13859                 /* fall-through. */
13860         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13861                 /* fall-through. */
13862         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13863                 return hrxqs[2];
13864         case MLX5_RSS_HASH_IPV6:
13865                 /* fall-through. */
13866         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13867                 /* fall-through. */
13868         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13869                 return hrxqs[3];
13870         case MLX5_RSS_HASH_IPV6_TCP:
13871                 /* fall-through. */
13872         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13873                 /* fall-through. */
13874         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13875                 return hrxqs[4];
13876         case MLX5_RSS_HASH_IPV6_UDP:
13877                 /* fall-through. */
13878         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13879                 /* fall-through. */
13880         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13881                 return hrxqs[5];
13882         case MLX5_RSS_HASH_NONE:
13883                 return hrxqs[6];
13884         default:
13885                 return 0;
13886         }
13887
13888 }
13889
13890 /**
13891  * Apply the flow to the NIC, lock free,
13892  * (mutex should be acquired by caller).
13893  *
13894  * @param[in] dev
13895  *   Pointer to the Ethernet device structure.
13896  * @param[in, out] flow
13897  *   Pointer to flow structure.
13898  * @param[out] error
13899  *   Pointer to error structure.
13900  *
13901  * @return
13902  *   0 on success, a negative errno value otherwise and rte_errno is set.
13903  */
13904 static int
13905 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13906               struct rte_flow_error *error)
13907 {
13908         struct mlx5_flow_dv_workspace *dv;
13909         struct mlx5_flow_handle *dh;
13910         struct mlx5_flow_handle_dv *dv_h;
13911         struct mlx5_flow *dev_flow;
13912         struct mlx5_priv *priv = dev->data->dev_private;
13913         uint32_t handle_idx;
13914         int n;
13915         int err;
13916         int idx;
13917         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13918         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13919         uint8_t misc_mask;
13920
13921         MLX5_ASSERT(wks);
13922         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13923                 dev_flow = &wks->flows[idx];
13924                 dv = &dev_flow->dv;
13925                 dh = dev_flow->handle;
13926                 dv_h = &dh->dvh;
13927                 n = dv->actions_n;
13928                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13929                         if (dv->transfer) {
13930                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13931                                 dv->actions[n++] = priv->sh->dr_drop_action;
13932                         } else {
13933 #ifdef HAVE_MLX5DV_DR
13934                                 /* DR supports drop action placeholder. */
13935                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13936                                 dv->actions[n++] = dv->group ?
13937                                         priv->sh->dr_drop_action :
13938                                         priv->root_drop_action;
13939 #else
13940                                 /* For DV we use the explicit drop queue. */
13941                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13942                                 dv->actions[n++] =
13943                                                 priv->drop_queue.hrxq->action;
13944 #endif
13945                         }
13946                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13947                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13948                         struct mlx5_hrxq *hrxq;
13949                         uint32_t hrxq_idx;
13950
13951                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13952                                                     &hrxq_idx);
13953                         if (!hrxq) {
13954                                 rte_flow_error_set
13955                                         (error, rte_errno,
13956                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13957                                          "cannot get hash queue");
13958                                 goto error;
13959                         }
13960                         dh->rix_hrxq = hrxq_idx;
13961                         dv->actions[n++] = hrxq->action;
13962                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13963                         struct mlx5_hrxq *hrxq = NULL;
13964                         uint32_t hrxq_idx;
13965
13966                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13967                                                 rss_desc->shared_rss,
13968                                                 dev_flow->hash_fields);
13969                         if (hrxq_idx)
13970                                 hrxq = mlx5_ipool_get
13971                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13972                                          hrxq_idx);
13973                         if (!hrxq) {
13974                                 rte_flow_error_set
13975                                         (error, rte_errno,
13976                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13977                                          "cannot get hash queue");
13978                                 goto error;
13979                         }
13980                         dh->rix_srss = rss_desc->shared_rss;
13981                         dv->actions[n++] = hrxq->action;
13982                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13983                         if (!priv->sh->default_miss_action) {
13984                                 rte_flow_error_set
13985                                         (error, rte_errno,
13986                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13987                                          "default miss action not be created.");
13988                                 goto error;
13989                         }
13990                         dv->actions[n++] = priv->sh->default_miss_action;
13991                 }
13992                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13993                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13994                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13995                                                (void *)&dv->value, n,
13996                                                dv->actions, &dh->drv_flow);
13997                 if (err) {
13998                         rte_flow_error_set
13999                                 (error, errno,
14000                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14001                                 NULL,
14002                                 (!priv->config.allow_duplicate_pattern &&
14003                                 errno == EEXIST) ?
14004                                 "duplicating pattern is not allowed" :
14005                                 "hardware refuses to create flow");
14006                         goto error;
14007                 }
14008                 if (priv->vmwa_context &&
14009                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14010                         /*
14011                          * The rule contains the VLAN pattern.
14012                          * For VF we are going to create VLAN
14013                          * interface to make hypervisor set correct
14014                          * e-Switch vport context.
14015                          */
14016                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14017                 }
14018         }
14019         return 0;
14020 error:
14021         err = rte_errno; /* Save rte_errno before cleanup. */
14022         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14023                        handle_idx, dh, next) {
14024                 /* hrxq is union, don't clear it if the flag is not set. */
14025                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14026                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14027                         dh->rix_hrxq = 0;
14028                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14029                         dh->rix_srss = 0;
14030                 }
14031                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14032                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14033         }
14034         rte_errno = err; /* Restore rte_errno. */
14035         return -rte_errno;
14036 }
14037
14038 void
14039 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14040                           struct mlx5_list_entry *entry)
14041 {
14042         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14043                                                              typeof(*resource),
14044                                                              entry);
14045
14046         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14047         mlx5_free(resource);
14048 }
14049
14050 /**
14051  * Release the flow matcher.
14052  *
14053  * @param dev
14054  *   Pointer to Ethernet device.
14055  * @param port_id
14056  *   Index to port ID action resource.
14057  *
14058  * @return
14059  *   1 while a reference on it exists, 0 when freed.
14060  */
14061 static int
14062 flow_dv_matcher_release(struct rte_eth_dev *dev,
14063                         struct mlx5_flow_handle *handle)
14064 {
14065         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14066         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14067                                                             typeof(*tbl), tbl);
14068         int ret;
14069
14070         MLX5_ASSERT(matcher->matcher_object);
14071         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14072         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14073         return ret;
14074 }
14075
14076 void
14077 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14078 {
14079         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14080         struct mlx5_flow_dv_encap_decap_resource *res =
14081                                        container_of(entry, typeof(*res), entry);
14082
14083         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14084         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14085 }
14086
14087 /**
14088  * Release an encap/decap resource.
14089  *
14090  * @param dev
14091  *   Pointer to Ethernet device.
14092  * @param encap_decap_idx
14093  *   Index of encap decap resource.
14094  *
14095  * @return
14096  *   1 while a reference on it exists, 0 when freed.
14097  */
14098 static int
14099 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14100                                      uint32_t encap_decap_idx)
14101 {
14102         struct mlx5_priv *priv = dev->data->dev_private;
14103         struct mlx5_flow_dv_encap_decap_resource *resource;
14104
14105         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14106                                   encap_decap_idx);
14107         if (!resource)
14108                 return 0;
14109         MLX5_ASSERT(resource->action);
14110         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14111 }
14112
14113 /**
14114  * Release an jump to table action resource.
14115  *
14116  * @param dev
14117  *   Pointer to Ethernet device.
14118  * @param rix_jump
14119  *   Index to the jump action resource.
14120  *
14121  * @return
14122  *   1 while a reference on it exists, 0 when freed.
14123  */
14124 static int
14125 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14126                                   uint32_t rix_jump)
14127 {
14128         struct mlx5_priv *priv = dev->data->dev_private;
14129         struct mlx5_flow_tbl_data_entry *tbl_data;
14130
14131         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14132                                   rix_jump);
14133         if (!tbl_data)
14134                 return 0;
14135         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14136 }
14137
14138 void
14139 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14140 {
14141         struct mlx5_flow_dv_modify_hdr_resource *res =
14142                 container_of(entry, typeof(*res), entry);
14143         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14144
14145         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14146         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14147 }
14148
14149 /**
14150  * Release a modify-header resource.
14151  *
14152  * @param dev
14153  *   Pointer to Ethernet device.
14154  * @param handle
14155  *   Pointer to mlx5_flow_handle.
14156  *
14157  * @return
14158  *   1 while a reference on it exists, 0 when freed.
14159  */
14160 static int
14161 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14162                                     struct mlx5_flow_handle *handle)
14163 {
14164         struct mlx5_priv *priv = dev->data->dev_private;
14165         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14166
14167         MLX5_ASSERT(entry->action);
14168         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14169 }
14170
14171 void
14172 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14173 {
14174         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14175         struct mlx5_flow_dv_port_id_action_resource *resource =
14176                                   container_of(entry, typeof(*resource), entry);
14177
14178         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14179         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14180 }
14181
14182 /**
14183  * Release port ID action resource.
14184  *
14185  * @param dev
14186  *   Pointer to Ethernet device.
14187  * @param handle
14188  *   Pointer to mlx5_flow_handle.
14189  *
14190  * @return
14191  *   1 while a reference on it exists, 0 when freed.
14192  */
14193 static int
14194 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14195                                         uint32_t port_id)
14196 {
14197         struct mlx5_priv *priv = dev->data->dev_private;
14198         struct mlx5_flow_dv_port_id_action_resource *resource;
14199
14200         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14201         if (!resource)
14202                 return 0;
14203         MLX5_ASSERT(resource->action);
14204         return mlx5_list_unregister(priv->sh->port_id_action_list,
14205                                     &resource->entry);
14206 }
14207
14208 /**
14209  * Release shared RSS action resource.
14210  *
14211  * @param dev
14212  *   Pointer to Ethernet device.
14213  * @param srss
14214  *   Shared RSS action index.
14215  */
14216 static void
14217 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14218 {
14219         struct mlx5_priv *priv = dev->data->dev_private;
14220         struct mlx5_shared_action_rss *shared_rss;
14221
14222         shared_rss = mlx5_ipool_get
14223                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14224         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14225 }
14226
14227 void
14228 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14229 {
14230         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14231         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14232                         container_of(entry, typeof(*resource), entry);
14233
14234         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14235         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14236 }
14237
14238 /**
14239  * Release push vlan action resource.
14240  *
14241  * @param dev
14242  *   Pointer to Ethernet device.
14243  * @param handle
14244  *   Pointer to mlx5_flow_handle.
14245  *
14246  * @return
14247  *   1 while a reference on it exists, 0 when freed.
14248  */
14249 static int
14250 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14251                                           struct mlx5_flow_handle *handle)
14252 {
14253         struct mlx5_priv *priv = dev->data->dev_private;
14254         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14255         uint32_t idx = handle->dvh.rix_push_vlan;
14256
14257         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14258         if (!resource)
14259                 return 0;
14260         MLX5_ASSERT(resource->action);
14261         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14262                                     &resource->entry);
14263 }
14264
14265 /**
14266  * Release the fate resource.
14267  *
14268  * @param dev
14269  *   Pointer to Ethernet device.
14270  * @param handle
14271  *   Pointer to mlx5_flow_handle.
14272  */
14273 static void
14274 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14275                                struct mlx5_flow_handle *handle)
14276 {
14277         if (!handle->rix_fate)
14278                 return;
14279         switch (handle->fate_action) {
14280         case MLX5_FLOW_FATE_QUEUE:
14281                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14282                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14283                 break;
14284         case MLX5_FLOW_FATE_JUMP:
14285                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14286                 break;
14287         case MLX5_FLOW_FATE_PORT_ID:
14288                 flow_dv_port_id_action_resource_release(dev,
14289                                 handle->rix_port_id_action);
14290                 break;
14291         default:
14292                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14293                 break;
14294         }
14295         handle->rix_fate = 0;
14296 }
14297
14298 void
14299 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14300                          struct mlx5_list_entry *entry)
14301 {
14302         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14303                                                               typeof(*resource),
14304                                                               entry);
14305         struct rte_eth_dev *dev = resource->dev;
14306         struct mlx5_priv *priv = dev->data->dev_private;
14307
14308         if (resource->verbs_action)
14309                 claim_zero(mlx5_flow_os_destroy_flow_action
14310                                                       (resource->verbs_action));
14311         if (resource->normal_path_tbl)
14312                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14313                                              resource->normal_path_tbl);
14314         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14315         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14316         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14317 }
14318
14319 /**
14320  * Release an sample resource.
14321  *
14322  * @param dev
14323  *   Pointer to Ethernet device.
14324  * @param handle
14325  *   Pointer to mlx5_flow_handle.
14326  *
14327  * @return
14328  *   1 while a reference on it exists, 0 when freed.
14329  */
14330 static int
14331 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14332                                      struct mlx5_flow_handle *handle)
14333 {
14334         struct mlx5_priv *priv = dev->data->dev_private;
14335         struct mlx5_flow_dv_sample_resource *resource;
14336
14337         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14338                                   handle->dvh.rix_sample);
14339         if (!resource)
14340                 return 0;
14341         MLX5_ASSERT(resource->verbs_action);
14342         return mlx5_list_unregister(priv->sh->sample_action_list,
14343                                     &resource->entry);
14344 }
14345
14346 void
14347 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14348                              struct mlx5_list_entry *entry)
14349 {
14350         struct mlx5_flow_dv_dest_array_resource *resource =
14351                         container_of(entry, typeof(*resource), entry);
14352         struct rte_eth_dev *dev = resource->dev;
14353         struct mlx5_priv *priv = dev->data->dev_private;
14354         uint32_t i = 0;
14355
14356         MLX5_ASSERT(resource->action);
14357         if (resource->action)
14358                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14359         for (; i < resource->num_of_dest; i++)
14360                 flow_dv_sample_sub_actions_release(dev,
14361                                                    &resource->sample_idx[i]);
14362         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14363         DRV_LOG(DEBUG, "destination array resource %p: removed",
14364                 (void *)resource);
14365 }
14366
14367 /**
14368  * Release an destination array resource.
14369  *
14370  * @param dev
14371  *   Pointer to Ethernet device.
14372  * @param handle
14373  *   Pointer to mlx5_flow_handle.
14374  *
14375  * @return
14376  *   1 while a reference on it exists, 0 when freed.
14377  */
14378 static int
14379 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14380                                     struct mlx5_flow_handle *handle)
14381 {
14382         struct mlx5_priv *priv = dev->data->dev_private;
14383         struct mlx5_flow_dv_dest_array_resource *resource;
14384
14385         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14386                                   handle->dvh.rix_dest_array);
14387         if (!resource)
14388                 return 0;
14389         MLX5_ASSERT(resource->action);
14390         return mlx5_list_unregister(priv->sh->dest_array_list,
14391                                     &resource->entry);
14392 }
14393
14394 static void
14395 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14396 {
14397         struct mlx5_priv *priv = dev->data->dev_private;
14398         struct mlx5_dev_ctx_shared *sh = priv->sh;
14399         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14400                                 sh->geneve_tlv_option_resource;
14401         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14402         if (geneve_opt_resource) {
14403                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14404                                          __ATOMIC_RELAXED))) {
14405                         claim_zero(mlx5_devx_cmd_destroy
14406                                         (geneve_opt_resource->obj));
14407                         mlx5_free(sh->geneve_tlv_option_resource);
14408                         sh->geneve_tlv_option_resource = NULL;
14409                 }
14410         }
14411         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14412 }
14413
14414 /**
14415  * Remove the flow from the NIC but keeps it in memory.
14416  * Lock free, (mutex should be acquired by caller).
14417  *
14418  * @param[in] dev
14419  *   Pointer to Ethernet device.
14420  * @param[in, out] flow
14421  *   Pointer to flow structure.
14422  */
14423 static void
14424 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14425 {
14426         struct mlx5_flow_handle *dh;
14427         uint32_t handle_idx;
14428         struct mlx5_priv *priv = dev->data->dev_private;
14429
14430         if (!flow)
14431                 return;
14432         handle_idx = flow->dev_handles;
14433         while (handle_idx) {
14434                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14435                                     handle_idx);
14436                 if (!dh)
14437                         return;
14438                 if (dh->drv_flow) {
14439                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14440                         dh->drv_flow = NULL;
14441                 }
14442                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14443                         flow_dv_fate_resource_release(dev, dh);
14444                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14445                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14446                 handle_idx = dh->next.next;
14447         }
14448 }
14449
14450 /**
14451  * Remove the flow from the NIC and the memory.
14452  * Lock free, (mutex should be acquired by caller).
14453  *
14454  * @param[in] dev
14455  *   Pointer to the Ethernet device structure.
14456  * @param[in, out] flow
14457  *   Pointer to flow structure.
14458  */
14459 static void
14460 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14461 {
14462         struct mlx5_flow_handle *dev_handle;
14463         struct mlx5_priv *priv = dev->data->dev_private;
14464         struct mlx5_flow_meter_info *fm = NULL;
14465         uint32_t srss = 0;
14466
14467         if (!flow)
14468                 return;
14469         flow_dv_remove(dev, flow);
14470         if (flow->counter) {
14471                 flow_dv_counter_free(dev, flow->counter);
14472                 flow->counter = 0;
14473         }
14474         if (flow->meter) {
14475                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14476                 if (fm)
14477                         mlx5_flow_meter_detach(priv, fm);
14478                 flow->meter = 0;
14479         }
14480         /* Keep the current age handling by default. */
14481         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14482                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14483         else if (flow->age)
14484                 flow_dv_aso_age_release(dev, flow->age);
14485         if (flow->geneve_tlv_option) {
14486                 flow_dv_geneve_tlv_option_resource_release(dev);
14487                 flow->geneve_tlv_option = 0;
14488         }
14489         while (flow->dev_handles) {
14490                 uint32_t tmp_idx = flow->dev_handles;
14491
14492                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14493                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14494                 if (!dev_handle)
14495                         return;
14496                 flow->dev_handles = dev_handle->next.next;
14497                 while (dev_handle->flex_item) {
14498                         int index = rte_bsf32(dev_handle->flex_item);
14499
14500                         mlx5_flex_release_index(dev, index);
14501                         dev_handle->flex_item &= ~RTE_BIT32(index);
14502                 }
14503                 if (dev_handle->dvh.matcher)
14504                         flow_dv_matcher_release(dev, dev_handle);
14505                 if (dev_handle->dvh.rix_sample)
14506                         flow_dv_sample_resource_release(dev, dev_handle);
14507                 if (dev_handle->dvh.rix_dest_array)
14508                         flow_dv_dest_array_resource_release(dev, dev_handle);
14509                 if (dev_handle->dvh.rix_encap_decap)
14510                         flow_dv_encap_decap_resource_release(dev,
14511                                 dev_handle->dvh.rix_encap_decap);
14512                 if (dev_handle->dvh.modify_hdr)
14513                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14514                 if (dev_handle->dvh.rix_push_vlan)
14515                         flow_dv_push_vlan_action_resource_release(dev,
14516                                                                   dev_handle);
14517                 if (dev_handle->dvh.rix_tag)
14518                         flow_dv_tag_release(dev,
14519                                             dev_handle->dvh.rix_tag);
14520                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14521                         flow_dv_fate_resource_release(dev, dev_handle);
14522                 else if (!srss)
14523                         srss = dev_handle->rix_srss;
14524                 if (fm && dev_handle->is_meter_flow_id &&
14525                     dev_handle->split_flow_id)
14526                         mlx5_ipool_free(fm->flow_ipool,
14527                                         dev_handle->split_flow_id);
14528                 else if (dev_handle->split_flow_id &&
14529                     !dev_handle->is_meter_flow_id)
14530                         mlx5_ipool_free(priv->sh->ipool
14531                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14532                                         dev_handle->split_flow_id);
14533                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14534                            tmp_idx);
14535         }
14536         if (srss)
14537                 flow_dv_shared_rss_action_release(dev, srss);
14538 }
14539
14540 /**
14541  * Release array of hash RX queue objects.
14542  * Helper function.
14543  *
14544  * @param[in] dev
14545  *   Pointer to the Ethernet device structure.
14546  * @param[in, out] hrxqs
14547  *   Array of hash RX queue objects.
14548  *
14549  * @return
14550  *   Total number of references to hash RX queue objects in *hrxqs* array
14551  *   after this operation.
14552  */
14553 static int
14554 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14555                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14556 {
14557         size_t i;
14558         int remaining = 0;
14559
14560         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14561                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14562
14563                 if (!ret)
14564                         (*hrxqs)[i] = 0;
14565                 remaining += ret;
14566         }
14567         return remaining;
14568 }
14569
14570 /**
14571  * Release all hash RX queue objects representing shared RSS action.
14572  *
14573  * @param[in] dev
14574  *   Pointer to the Ethernet device structure.
14575  * @param[in, out] action
14576  *   Shared RSS action to remove hash RX queue objects from.
14577  *
14578  * @return
14579  *   Total number of references to hash RX queue objects stored in *action*
14580  *   after this operation.
14581  *   Expected to be 0 if no external references held.
14582  */
14583 static int
14584 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14585                                  struct mlx5_shared_action_rss *shared_rss)
14586 {
14587         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14588 }
14589
14590 /**
14591  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14592  * user input.
14593  *
14594  * Only one hash value is available for one L3+L4 combination:
14595  * for example:
14596  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14597  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14598  * same slot in mlx5_rss_hash_fields.
14599  *
14600  * @param[in] rss
14601  *   Pointer to the shared action RSS conf.
14602  * @param[in, out] hash_field
14603  *   hash_field variable needed to be adjusted.
14604  *
14605  * @return
14606  *   void
14607  */
14608 static void
14609 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14610                                      uint64_t *hash_field)
14611 {
14612         uint64_t rss_types = rss->origin.types;
14613
14614         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14615         case MLX5_RSS_HASH_IPV4:
14616                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14617                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14618                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14619                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14620                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14621                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14622                         else
14623                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14624                 }
14625                 return;
14626         case MLX5_RSS_HASH_IPV6:
14627                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14628                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14629                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14630                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14631                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14632                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14633                         else
14634                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14635                 }
14636                 return;
14637         case MLX5_RSS_HASH_IPV4_UDP:
14638                 /* fall-through. */
14639         case MLX5_RSS_HASH_IPV6_UDP:
14640                 if (rss_types & RTE_ETH_RSS_UDP) {
14641                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14642                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14643                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14644                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14645                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14646                         else
14647                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14648                 }
14649                 return;
14650         case MLX5_RSS_HASH_IPV4_TCP:
14651                 /* fall-through. */
14652         case MLX5_RSS_HASH_IPV6_TCP:
14653                 if (rss_types & RTE_ETH_RSS_TCP) {
14654                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14655                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14656                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14657                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14658                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14659                         else
14660                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14661                 }
14662                 return;
14663         default:
14664                 return;
14665         }
14666 }
14667
14668 /**
14669  * Setup shared RSS action.
14670  * Prepare set of hash RX queue objects sufficient to handle all valid
14671  * hash_fields combinations (see enum ibv_rx_hash_fields).
14672  *
14673  * @param[in] dev
14674  *   Pointer to the Ethernet device structure.
14675  * @param[in] action_idx
14676  *   Shared RSS action ipool index.
14677  * @param[in, out] action
14678  *   Partially initialized shared RSS action.
14679  * @param[out] error
14680  *   Perform verbose error reporting if not NULL. Initialized in case of
14681  *   error only.
14682  *
14683  * @return
14684  *   0 on success, otherwise negative errno value.
14685  */
14686 static int
14687 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14688                            uint32_t action_idx,
14689                            struct mlx5_shared_action_rss *shared_rss,
14690                            struct rte_flow_error *error)
14691 {
14692         struct mlx5_flow_rss_desc rss_desc = { 0 };
14693         size_t i;
14694         int err;
14695
14696         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
14697                                      !!dev->data->dev_started)) {
14698                 return rte_flow_error_set(error, rte_errno,
14699                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14700                                           "cannot setup indirection table");
14701         }
14702         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14703         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14704         rss_desc.const_q = shared_rss->origin.queue;
14705         rss_desc.queue_num = shared_rss->origin.queue_num;
14706         /* Set non-zero value to indicate a shared RSS. */
14707         rss_desc.shared_rss = action_idx;
14708         rss_desc.ind_tbl = shared_rss->ind_tbl;
14709         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14710                 uint32_t hrxq_idx;
14711                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14712                 int tunnel = 0;
14713
14714                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14715                 if (shared_rss->origin.level > 1) {
14716                         hash_fields |= IBV_RX_HASH_INNER;
14717                         tunnel = 1;
14718                 }
14719                 rss_desc.tunnel = tunnel;
14720                 rss_desc.hash_fields = hash_fields;
14721                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14722                 if (!hrxq_idx) {
14723                         rte_flow_error_set
14724                                 (error, rte_errno,
14725                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14726                                  "cannot get hash queue");
14727                         goto error_hrxq_new;
14728                 }
14729                 err = __flow_dv_action_rss_hrxq_set
14730                         (shared_rss, hash_fields, hrxq_idx);
14731                 MLX5_ASSERT(!err);
14732         }
14733         return 0;
14734 error_hrxq_new:
14735         err = rte_errno;
14736         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14737         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
14738                 shared_rss->ind_tbl = NULL;
14739         rte_errno = err;
14740         return -rte_errno;
14741 }
14742
14743 /**
14744  * Create shared RSS action.
14745  *
14746  * @param[in] dev
14747  *   Pointer to the Ethernet device structure.
14748  * @param[in] conf
14749  *   Shared action configuration.
14750  * @param[in] rss
14751  *   RSS action specification used to create shared action.
14752  * @param[out] error
14753  *   Perform verbose error reporting if not NULL. Initialized in case of
14754  *   error only.
14755  *
14756  * @return
14757  *   A valid shared action ID in case of success, 0 otherwise and
14758  *   rte_errno is set.
14759  */
14760 static uint32_t
14761 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14762                             const struct rte_flow_indir_action_conf *conf,
14763                             const struct rte_flow_action_rss *rss,
14764                             struct rte_flow_error *error)
14765 {
14766         struct mlx5_priv *priv = dev->data->dev_private;
14767         struct mlx5_shared_action_rss *shared_rss = NULL;
14768         void *queue = NULL;
14769         struct rte_flow_action_rss *origin;
14770         const uint8_t *rss_key;
14771         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14772         uint32_t idx;
14773
14774         RTE_SET_USED(conf);
14775         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14776                             0, SOCKET_ID_ANY);
14777         shared_rss = mlx5_ipool_zmalloc
14778                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14779         if (!shared_rss || !queue) {
14780                 rte_flow_error_set(error, ENOMEM,
14781                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14782                                    "cannot allocate resource memory");
14783                 goto error_rss_init;
14784         }
14785         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14786                 rte_flow_error_set(error, E2BIG,
14787                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14788                                    "rss action number out of range");
14789                 goto error_rss_init;
14790         }
14791         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14792                                           sizeof(*shared_rss->ind_tbl),
14793                                           0, SOCKET_ID_ANY);
14794         if (!shared_rss->ind_tbl) {
14795                 rte_flow_error_set(error, ENOMEM,
14796                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14797                                    "cannot allocate resource memory");
14798                 goto error_rss_init;
14799         }
14800         memcpy(queue, rss->queue, queue_size);
14801         shared_rss->ind_tbl->queues = queue;
14802         shared_rss->ind_tbl->queues_n = rss->queue_num;
14803         origin = &shared_rss->origin;
14804         origin->func = rss->func;
14805         origin->level = rss->level;
14806         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14807         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14808         /* NULL RSS key indicates default RSS key. */
14809         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14810         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14811         origin->key = &shared_rss->key[0];
14812         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14813         origin->queue = queue;
14814         origin->queue_num = rss->queue_num;
14815         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14816                 goto error_rss_init;
14817         rte_spinlock_init(&shared_rss->action_rss_sl);
14818         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14819         rte_spinlock_lock(&priv->shared_act_sl);
14820         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14821                      &priv->rss_shared_actions, idx, shared_rss, next);
14822         rte_spinlock_unlock(&priv->shared_act_sl);
14823         return idx;
14824 error_rss_init:
14825         if (shared_rss) {
14826                 if (shared_rss->ind_tbl)
14827                         mlx5_free(shared_rss->ind_tbl);
14828                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14829                                 idx);
14830         }
14831         if (queue)
14832                 mlx5_free(queue);
14833         return 0;
14834 }
14835
14836 /**
14837  * Destroy the shared RSS action.
14838  * Release related hash RX queue objects.
14839  *
14840  * @param[in] dev
14841  *   Pointer to the Ethernet device structure.
14842  * @param[in] idx
14843  *   The shared RSS action object ID to be removed.
14844  * @param[out] error
14845  *   Perform verbose error reporting if not NULL. Initialized in case of
14846  *   error only.
14847  *
14848  * @return
14849  *   0 on success, otherwise negative errno value.
14850  */
14851 static int
14852 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14853                              struct rte_flow_error *error)
14854 {
14855         struct mlx5_priv *priv = dev->data->dev_private;
14856         struct mlx5_shared_action_rss *shared_rss =
14857             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14858         uint32_t old_refcnt = 1;
14859         int remaining;
14860         uint16_t *queue = NULL;
14861
14862         if (!shared_rss)
14863                 return rte_flow_error_set(error, EINVAL,
14864                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14865                                           "invalid shared action");
14866         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14867                                          0, 0, __ATOMIC_ACQUIRE,
14868                                          __ATOMIC_RELAXED))
14869                 return rte_flow_error_set(error, EBUSY,
14870                                           RTE_FLOW_ERROR_TYPE_ACTION,
14871                                           NULL,
14872                                           "shared rss has references");
14873         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14874         if (remaining)
14875                 return rte_flow_error_set(error, EBUSY,
14876                                           RTE_FLOW_ERROR_TYPE_ACTION,
14877                                           NULL,
14878                                           "shared rss hrxq has references");
14879         queue = shared_rss->ind_tbl->queues;
14880         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
14881                                                !!dev->data->dev_started);
14882         if (remaining)
14883                 return rte_flow_error_set(error, EBUSY,
14884                                           RTE_FLOW_ERROR_TYPE_ACTION,
14885                                           NULL,
14886                                           "shared rss indirection table has"
14887                                           " references");
14888         mlx5_free(queue);
14889         rte_spinlock_lock(&priv->shared_act_sl);
14890         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14891                      &priv->rss_shared_actions, idx, shared_rss, next);
14892         rte_spinlock_unlock(&priv->shared_act_sl);
14893         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14894                         idx);
14895         return 0;
14896 }
14897
14898 /**
14899  * Create indirect action, lock free,
14900  * (mutex should be acquired by caller).
14901  * Dispatcher for action type specific call.
14902  *
14903  * @param[in] dev
14904  *   Pointer to the Ethernet device structure.
14905  * @param[in] conf
14906  *   Shared action configuration.
14907  * @param[in] action
14908  *   Action specification used to create indirect action.
14909  * @param[out] error
14910  *   Perform verbose error reporting if not NULL. Initialized in case of
14911  *   error only.
14912  *
14913  * @return
14914  *   A valid shared action handle in case of success, NULL otherwise and
14915  *   rte_errno is set.
14916  */
14917 static struct rte_flow_action_handle *
14918 flow_dv_action_create(struct rte_eth_dev *dev,
14919                       const struct rte_flow_indir_action_conf *conf,
14920                       const struct rte_flow_action *action,
14921                       struct rte_flow_error *err)
14922 {
14923         struct mlx5_priv *priv = dev->data->dev_private;
14924         uint32_t age_idx = 0;
14925         uint32_t idx = 0;
14926         uint32_t ret = 0;
14927
14928         switch (action->type) {
14929         case RTE_FLOW_ACTION_TYPE_RSS:
14930                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14931                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14932                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14933                 break;
14934         case RTE_FLOW_ACTION_TYPE_AGE:
14935                 age_idx = flow_dv_aso_age_alloc(dev, err);
14936                 if (!age_idx) {
14937                         ret = -rte_errno;
14938                         break;
14939                 }
14940                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14941                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14942                 flow_dv_aso_age_params_init(dev, age_idx,
14943                                         ((const struct rte_flow_action_age *)
14944                                                 action->conf)->context ?
14945                                         ((const struct rte_flow_action_age *)
14946                                                 action->conf)->context :
14947                                         (void *)(uintptr_t)idx,
14948                                         ((const struct rte_flow_action_age *)
14949                                                 action->conf)->timeout);
14950                 ret = age_idx;
14951                 break;
14952         case RTE_FLOW_ACTION_TYPE_COUNT:
14953                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14954                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14955                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14956                 break;
14957         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14958                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14959                                                          err);
14960                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14961                 break;
14962         default:
14963                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14964                                    NULL, "action type not supported");
14965                 break;
14966         }
14967         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14968 }
14969
14970 /**
14971  * Destroy the indirect action.
14972  * Release action related resources on the NIC and the memory.
14973  * Lock free, (mutex should be acquired by caller).
14974  * Dispatcher for action type specific call.
14975  *
14976  * @param[in] dev
14977  *   Pointer to the Ethernet device structure.
14978  * @param[in] handle
14979  *   The indirect action object handle to be removed.
14980  * @param[out] error
14981  *   Perform verbose error reporting if not NULL. Initialized in case of
14982  *   error only.
14983  *
14984  * @return
14985  *   0 on success, otherwise negative errno value.
14986  */
14987 static int
14988 flow_dv_action_destroy(struct rte_eth_dev *dev,
14989                        struct rte_flow_action_handle *handle,
14990                        struct rte_flow_error *error)
14991 {
14992         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14993         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14994         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14995         struct mlx5_flow_counter *cnt;
14996         uint32_t no_flow_refcnt = 1;
14997         int ret;
14998
14999         switch (type) {
15000         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15001                 return __flow_dv_action_rss_release(dev, idx, error);
15002         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15003                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15004                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15005                                                  &no_flow_refcnt, 1, false,
15006                                                  __ATOMIC_ACQUIRE,
15007                                                  __ATOMIC_RELAXED))
15008                         return rte_flow_error_set(error, EBUSY,
15009                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15010                                                   NULL,
15011                                                   "Indirect count action has references");
15012                 flow_dv_counter_free(dev, idx);
15013                 return 0;
15014         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15015                 ret = flow_dv_aso_age_release(dev, idx);
15016                 if (ret)
15017                         /*
15018                          * In this case, the last flow has a reference will
15019                          * actually release the age action.
15020                          */
15021                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15022                                 " released with references %d.", idx, ret);
15023                 return 0;
15024         case MLX5_INDIRECT_ACTION_TYPE_CT:
15025                 ret = flow_dv_aso_ct_release(dev, idx, error);
15026                 if (ret < 0)
15027                         return ret;
15028                 if (ret > 0)
15029                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15030                                 "has references %d.", idx, ret);
15031                 return 0;
15032         default:
15033                 return rte_flow_error_set(error, ENOTSUP,
15034                                           RTE_FLOW_ERROR_TYPE_ACTION,
15035                                           NULL,
15036                                           "action type not supported");
15037         }
15038 }
15039
15040 /**
15041  * Updates in place shared RSS action configuration.
15042  *
15043  * @param[in] dev
15044  *   Pointer to the Ethernet device structure.
15045  * @param[in] idx
15046  *   The shared RSS action object ID to be updated.
15047  * @param[in] action_conf
15048  *   RSS action specification used to modify *shared_rss*.
15049  * @param[out] error
15050  *   Perform verbose error reporting if not NULL. Initialized in case of
15051  *   error only.
15052  *
15053  * @return
15054  *   0 on success, otherwise negative errno value.
15055  * @note: currently only support update of RSS queues.
15056  */
15057 static int
15058 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15059                             const struct rte_flow_action_rss *action_conf,
15060                             struct rte_flow_error *error)
15061 {
15062         struct mlx5_priv *priv = dev->data->dev_private;
15063         struct mlx5_shared_action_rss *shared_rss =
15064             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15065         int ret = 0;
15066         void *queue = NULL;
15067         uint16_t *queue_old = NULL;
15068         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15069         bool dev_started = !!dev->data->dev_started;
15070
15071         if (!shared_rss)
15072                 return rte_flow_error_set(error, EINVAL,
15073                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15074                                           "invalid shared action to update");
15075         if (priv->obj_ops.ind_table_modify == NULL)
15076                 return rte_flow_error_set(error, ENOTSUP,
15077                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15078                                           "cannot modify indirection table");
15079         queue = mlx5_malloc(MLX5_MEM_ZERO,
15080                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15081                             0, SOCKET_ID_ANY);
15082         if (!queue)
15083                 return rte_flow_error_set(error, ENOMEM,
15084                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15085                                           NULL,
15086                                           "cannot allocate resource memory");
15087         memcpy(queue, action_conf->queue, queue_size);
15088         MLX5_ASSERT(shared_rss->ind_tbl);
15089         rte_spinlock_lock(&shared_rss->action_rss_sl);
15090         queue_old = shared_rss->ind_tbl->queues;
15091         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15092                                         queue, action_conf->queue_num,
15093                                         true /* standalone */,
15094                                         dev_started /* ref_new_qs */,
15095                                         dev_started /* deref_old_qs */);
15096         if (ret) {
15097                 mlx5_free(queue);
15098                 ret = rte_flow_error_set(error, rte_errno,
15099                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15100                                           "cannot update indirection table");
15101         } else {
15102                 mlx5_free(queue_old);
15103                 shared_rss->origin.queue = queue;
15104                 shared_rss->origin.queue_num = action_conf->queue_num;
15105         }
15106         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15107         return ret;
15108 }
15109
15110 /*
15111  * Updates in place conntrack context or direction.
15112  * Context update should be synchronized.
15113  *
15114  * @param[in] dev
15115  *   Pointer to the Ethernet device structure.
15116  * @param[in] idx
15117  *   The conntrack object ID to be updated.
15118  * @param[in] update
15119  *   Pointer to the structure of information to update.
15120  * @param[out] error
15121  *   Perform verbose error reporting if not NULL. Initialized in case of
15122  *   error only.
15123  *
15124  * @return
15125  *   0 on success, otherwise negative errno value.
15126  */
15127 static int
15128 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15129                            const struct rte_flow_modify_conntrack *update,
15130                            struct rte_flow_error *error)
15131 {
15132         struct mlx5_priv *priv = dev->data->dev_private;
15133         struct mlx5_aso_ct_action *ct;
15134         const struct rte_flow_action_conntrack *new_prf;
15135         int ret = 0;
15136         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15137         uint32_t dev_idx;
15138
15139         if (PORT_ID(priv) != owner)
15140                 return rte_flow_error_set(error, EACCES,
15141                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15142                                           NULL,
15143                                           "CT object owned by another port");
15144         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15145         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15146         if (!ct->refcnt)
15147                 return rte_flow_error_set(error, ENOMEM,
15148                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15149                                           NULL,
15150                                           "CT object is inactive");
15151         new_prf = &update->new_ct;
15152         if (update->direction)
15153                 ct->is_original = !!new_prf->is_original_dir;
15154         if (update->state) {
15155                 /* Only validate the profile when it needs to be updated. */
15156                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15157                 if (ret)
15158                         return ret;
15159                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15160                 if (ret)
15161                         return rte_flow_error_set(error, EIO,
15162                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15163                                         NULL,
15164                                         "Failed to send CT context update WQE");
15165                 /* Block until ready or a failure. */
15166                 ret = mlx5_aso_ct_available(priv->sh, ct);
15167                 if (ret)
15168                         rte_flow_error_set(error, rte_errno,
15169                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15170                                            NULL,
15171                                            "Timeout to get the CT update");
15172         }
15173         return ret;
15174 }
15175
15176 /**
15177  * Updates in place shared action configuration, lock free,
15178  * (mutex should be acquired by caller).
15179  *
15180  * @param[in] dev
15181  *   Pointer to the Ethernet device structure.
15182  * @param[in] handle
15183  *   The indirect action object handle to be updated.
15184  * @param[in] update
15185  *   Action specification used to modify the action pointed by *handle*.
15186  *   *update* could be of same type with the action pointed by the *handle*
15187  *   handle argument, or some other structures like a wrapper, depending on
15188  *   the indirect action type.
15189  * @param[out] error
15190  *   Perform verbose error reporting if not NULL. Initialized in case of
15191  *   error only.
15192  *
15193  * @return
15194  *   0 on success, otherwise negative errno value.
15195  */
15196 static int
15197 flow_dv_action_update(struct rte_eth_dev *dev,
15198                         struct rte_flow_action_handle *handle,
15199                         const void *update,
15200                         struct rte_flow_error *err)
15201 {
15202         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15203         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15204         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15205         const void *action_conf;
15206
15207         switch (type) {
15208         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15209                 action_conf = ((const struct rte_flow_action *)update)->conf;
15210                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15211         case MLX5_INDIRECT_ACTION_TYPE_CT:
15212                 return __flow_dv_action_ct_update(dev, idx, update, err);
15213         default:
15214                 return rte_flow_error_set(err, ENOTSUP,
15215                                           RTE_FLOW_ERROR_TYPE_ACTION,
15216                                           NULL,
15217                                           "action type update not supported");
15218         }
15219 }
15220
15221 /**
15222  * Destroy the meter sub policy table rules.
15223  * Lock free, (mutex should be acquired by caller).
15224  *
15225  * @param[in] dev
15226  *   Pointer to Ethernet device.
15227  * @param[in] sub_policy
15228  *   Pointer to meter sub policy table.
15229  */
15230 static void
15231 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15232                              struct mlx5_flow_meter_sub_policy *sub_policy)
15233 {
15234         struct mlx5_priv *priv = dev->data->dev_private;
15235         struct mlx5_flow_tbl_data_entry *tbl;
15236         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15237         struct mlx5_flow_meter_info *next_fm;
15238         struct mlx5_sub_policy_color_rule *color_rule;
15239         void *tmp;
15240         uint32_t i;
15241
15242         for (i = 0; i < RTE_COLORS; i++) {
15243                 next_fm = NULL;
15244                 if (i == RTE_COLOR_GREEN && policy &&
15245                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15246                         next_fm = mlx5_flow_meter_find(priv,
15247                                         policy->act_cnt[i].next_mtr_id, NULL);
15248                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15249                                    next_port, tmp) {
15250                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15251                         tbl = container_of(color_rule->matcher->tbl,
15252                                            typeof(*tbl), tbl);
15253                         mlx5_list_unregister(tbl->matchers,
15254                                              &color_rule->matcher->entry);
15255                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15256                                      color_rule, next_port);
15257                         mlx5_free(color_rule);
15258                         if (next_fm)
15259                                 mlx5_flow_meter_detach(priv, next_fm);
15260                 }
15261         }
15262         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15263                 if (sub_policy->rix_hrxq[i]) {
15264                         if (policy && !policy->is_hierarchy)
15265                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15266                         sub_policy->rix_hrxq[i] = 0;
15267                 }
15268                 if (sub_policy->jump_tbl[i]) {
15269                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15270                                                      sub_policy->jump_tbl[i]);
15271                         sub_policy->jump_tbl[i] = NULL;
15272                 }
15273         }
15274         if (sub_policy->tbl_rsc) {
15275                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15276                                              sub_policy->tbl_rsc);
15277                 sub_policy->tbl_rsc = NULL;
15278         }
15279 }
15280
15281 /**
15282  * Destroy policy rules, lock free,
15283  * (mutex should be acquired by caller).
15284  * Dispatcher for action type specific call.
15285  *
15286  * @param[in] dev
15287  *   Pointer to the Ethernet device structure.
15288  * @param[in] mtr_policy
15289  *   Meter policy struct.
15290  */
15291 static void
15292 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15293                              struct mlx5_flow_meter_policy *mtr_policy)
15294 {
15295         uint32_t i, j;
15296         struct mlx5_flow_meter_sub_policy *sub_policy;
15297         uint16_t sub_policy_num;
15298
15299         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15300                 sub_policy_num = (mtr_policy->sub_policy_num >>
15301                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15302                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15303                 for (j = 0; j < sub_policy_num; j++) {
15304                         sub_policy = mtr_policy->sub_policys[i][j];
15305                         if (sub_policy)
15306                                 __flow_dv_destroy_sub_policy_rules(dev,
15307                                                                    sub_policy);
15308                 }
15309         }
15310 }
15311
15312 /**
15313  * Destroy policy action, lock free,
15314  * (mutex should be acquired by caller).
15315  * Dispatcher for action type specific call.
15316  *
15317  * @param[in] dev
15318  *   Pointer to the Ethernet device structure.
15319  * @param[in] mtr_policy
15320  *   Meter policy struct.
15321  */
15322 static void
15323 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15324                       struct mlx5_flow_meter_policy *mtr_policy)
15325 {
15326         struct rte_flow_action *rss_action;
15327         struct mlx5_flow_handle dev_handle;
15328         uint32_t i, j;
15329
15330         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15331                 if (mtr_policy->act_cnt[i].rix_mark) {
15332                         flow_dv_tag_release(dev,
15333                                 mtr_policy->act_cnt[i].rix_mark);
15334                         mtr_policy->act_cnt[i].rix_mark = 0;
15335                 }
15336                 if (mtr_policy->act_cnt[i].modify_hdr) {
15337                         dev_handle.dvh.modify_hdr =
15338                                 mtr_policy->act_cnt[i].modify_hdr;
15339                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15340                 }
15341                 switch (mtr_policy->act_cnt[i].fate_action) {
15342                 case MLX5_FLOW_FATE_SHARED_RSS:
15343                         rss_action = mtr_policy->act_cnt[i].rss;
15344                         mlx5_free(rss_action);
15345                         break;
15346                 case MLX5_FLOW_FATE_PORT_ID:
15347                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15348                                 flow_dv_port_id_action_resource_release(dev,
15349                                 mtr_policy->act_cnt[i].rix_port_id_action);
15350                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15351                         }
15352                         break;
15353                 case MLX5_FLOW_FATE_DROP:
15354                 case MLX5_FLOW_FATE_JUMP:
15355                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15356                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15357                                                 NULL;
15358                         break;
15359                 default:
15360                         /*Queue action do nothing*/
15361                         break;
15362                 }
15363         }
15364         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15365                 mtr_policy->dr_drop_action[j] = NULL;
15366 }
15367
15368 /**
15369  * Create policy action per domain, lock free,
15370  * (mutex should be acquired by caller).
15371  * Dispatcher for action type specific call.
15372  *
15373  * @param[in] dev
15374  *   Pointer to the Ethernet device structure.
15375  * @param[in] mtr_policy
15376  *   Meter policy struct.
15377  * @param[in] action
15378  *   Action specification used to create meter actions.
15379  * @param[out] error
15380  *   Perform verbose error reporting if not NULL. Initialized in case of
15381  *   error only.
15382  *
15383  * @return
15384  *   0 on success, otherwise negative errno value.
15385  */
15386 static int
15387 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15388                         struct mlx5_flow_meter_policy *mtr_policy,
15389                         const struct rte_flow_action *actions[RTE_COLORS],
15390                         enum mlx5_meter_domain domain,
15391                         struct rte_mtr_error *error)
15392 {
15393         struct mlx5_priv *priv = dev->data->dev_private;
15394         struct rte_flow_error flow_err;
15395         const struct rte_flow_action *act;
15396         uint64_t action_flags;
15397         struct mlx5_flow_handle dh;
15398         struct mlx5_flow dev_flow;
15399         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15400         int i, ret;
15401         uint8_t egress, transfer;
15402         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15403         union {
15404                 struct mlx5_flow_dv_modify_hdr_resource res;
15405                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15406                             sizeof(struct mlx5_modification_cmd) *
15407                             (MLX5_MAX_MODIFY_NUM + 1)];
15408         } mhdr_dummy;
15409         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15410         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
15411
15412         MLX5_ASSERT(wks);
15413         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15414         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15415         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15416         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15417         memset(&port_id_action, 0,
15418                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15419         memset(mhdr_res, 0, sizeof(*mhdr_res));
15420         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15421                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15422                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15423         dev_flow.handle = &dh;
15424         dev_flow.dv.port_id_action = &port_id_action;
15425         dev_flow.external = true;
15426         for (i = 0; i < RTE_COLORS; i++) {
15427                 if (i < MLX5_MTR_RTE_COLORS)
15428                         act_cnt = &mtr_policy->act_cnt[i];
15429                 /* Skip the color policy actions creation. */
15430                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15431                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15432                         continue;
15433                 action_flags = 0;
15434                 for (act = actions[i];
15435                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15436                         switch (act->type) {
15437                         case RTE_FLOW_ACTION_TYPE_MARK:
15438                         {
15439                                 uint32_t tag_be = mlx5_flow_mark_set
15440                                         (((const struct rte_flow_action_mark *)
15441                                         (act->conf))->id);
15442
15443                                 if (i >= MLX5_MTR_RTE_COLORS)
15444                                         return -rte_mtr_error_set(error,
15445                                           ENOTSUP,
15446                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15447                                           NULL,
15448                                           "cannot create policy "
15449                                           "mark action for this color");
15450                                 wks->mark = 1;
15451                                 if (flow_dv_tag_resource_register(dev, tag_be,
15452                                                   &dev_flow, &flow_err))
15453                                         return -rte_mtr_error_set(error,
15454                                         ENOTSUP,
15455                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15456                                         NULL,
15457                                         "cannot setup policy mark action");
15458                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15459                                 act_cnt->rix_mark =
15460                                         dev_flow.handle->dvh.rix_tag;
15461                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15462                                 break;
15463                         }
15464                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15465                                 if (i >= MLX5_MTR_RTE_COLORS)
15466                                         return -rte_mtr_error_set(error,
15467                                           ENOTSUP,
15468                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15469                                           NULL,
15470                                           "cannot create policy "
15471                                           "set tag action for this color");
15472                                 if (flow_dv_convert_action_set_tag
15473                                 (dev, mhdr_res,
15474                                 (const struct rte_flow_action_set_tag *)
15475                                 act->conf,  &flow_err))
15476                                         return -rte_mtr_error_set(error,
15477                                         ENOTSUP,
15478                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15479                                         NULL, "cannot convert policy "
15480                                         "set tag action");
15481                                 if (!mhdr_res->actions_num)
15482                                         return -rte_mtr_error_set(error,
15483                                         ENOTSUP,
15484                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15485                                         NULL, "cannot find policy "
15486                                         "set tag action");
15487                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15488                                 break;
15489                         case RTE_FLOW_ACTION_TYPE_DROP:
15490                         {
15491                                 struct mlx5_flow_mtr_mng *mtrmng =
15492                                                 priv->sh->mtrmng;
15493                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15494
15495                                 /*
15496                                  * Create the drop table with
15497                                  * METER DROP level.
15498                                  */
15499                                 if (!mtrmng->drop_tbl[domain]) {
15500                                         mtrmng->drop_tbl[domain] =
15501                                         flow_dv_tbl_resource_get(dev,
15502                                         MLX5_FLOW_TABLE_LEVEL_METER,
15503                                         egress, transfer, false, NULL, 0,
15504                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15505                                         if (!mtrmng->drop_tbl[domain])
15506                                                 return -rte_mtr_error_set
15507                                         (error, ENOTSUP,
15508                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15509                                         NULL,
15510                                         "Failed to create meter drop table");
15511                                 }
15512                                 tbl_data = container_of
15513                                 (mtrmng->drop_tbl[domain],
15514                                 struct mlx5_flow_tbl_data_entry, tbl);
15515                                 if (i < MLX5_MTR_RTE_COLORS) {
15516                                         act_cnt->dr_jump_action[domain] =
15517                                                 tbl_data->jump.action;
15518                                         act_cnt->fate_action =
15519                                                 MLX5_FLOW_FATE_DROP;
15520                                 }
15521                                 if (i == RTE_COLOR_RED)
15522                                         mtr_policy->dr_drop_action[domain] =
15523                                                 tbl_data->jump.action;
15524                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15525                                 break;
15526                         }
15527                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15528                         {
15529                                 if (i >= MLX5_MTR_RTE_COLORS)
15530                                         return -rte_mtr_error_set(error,
15531                                         ENOTSUP,
15532                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15533                                         NULL, "cannot create policy "
15534                                         "fate queue for this color");
15535                                 act_cnt->queue =
15536                                 ((const struct rte_flow_action_queue *)
15537                                         (act->conf))->index;
15538                                 act_cnt->fate_action =
15539                                         MLX5_FLOW_FATE_QUEUE;
15540                                 dev_flow.handle->fate_action =
15541                                         MLX5_FLOW_FATE_QUEUE;
15542                                 mtr_policy->is_queue = 1;
15543                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15544                                 break;
15545                         }
15546                         case RTE_FLOW_ACTION_TYPE_RSS:
15547                         {
15548                                 int rss_size;
15549
15550                                 if (i >= MLX5_MTR_RTE_COLORS)
15551                                         return -rte_mtr_error_set(error,
15552                                           ENOTSUP,
15553                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15554                                           NULL,
15555                                           "cannot create policy "
15556                                           "rss action for this color");
15557                                 /*
15558                                  * Save RSS conf into policy struct
15559                                  * for translate stage.
15560                                  */
15561                                 rss_size = (int)rte_flow_conv
15562                                         (RTE_FLOW_CONV_OP_ACTION,
15563                                         NULL, 0, act, &flow_err);
15564                                 if (rss_size <= 0)
15565                                         return -rte_mtr_error_set(error,
15566                                           ENOTSUP,
15567                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15568                                           NULL, "Get the wrong "
15569                                           "rss action struct size");
15570                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15571                                                 rss_size, 0, SOCKET_ID_ANY);
15572                                 if (!act_cnt->rss)
15573                                         return -rte_mtr_error_set(error,
15574                                           ENOTSUP,
15575                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15576                                           NULL,
15577                                           "Fail to malloc rss action memory");
15578                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15579                                         act_cnt->rss, rss_size,
15580                                         act, &flow_err);
15581                                 if (ret < 0)
15582                                         return -rte_mtr_error_set(error,
15583                                           ENOTSUP,
15584                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15585                                           NULL, "Fail to save "
15586                                           "rss action into policy struct");
15587                                 act_cnt->fate_action =
15588                                         MLX5_FLOW_FATE_SHARED_RSS;
15589                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15590                                 break;
15591                         }
15592                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15593                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15594                         {
15595                                 struct mlx5_flow_dv_port_id_action_resource
15596                                         port_id_resource;
15597                                 uint32_t port_id = 0;
15598
15599                                 if (i >= MLX5_MTR_RTE_COLORS)
15600                                         return -rte_mtr_error_set(error,
15601                                         ENOTSUP,
15602                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15603                                         NULL, "cannot create policy "
15604                                         "port action for this color");
15605                                 memset(&port_id_resource, 0,
15606                                         sizeof(port_id_resource));
15607                                 if (flow_dv_translate_action_port_id(dev, act,
15608                                                 &port_id, &flow_err))
15609                                         return -rte_mtr_error_set(error,
15610                                         ENOTSUP,
15611                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15612                                         NULL, "cannot translate "
15613                                         "policy port action");
15614                                 port_id_resource.port_id = port_id;
15615                                 if (flow_dv_port_id_action_resource_register
15616                                         (dev, &port_id_resource,
15617                                         &dev_flow, &flow_err))
15618                                         return -rte_mtr_error_set(error,
15619                                         ENOTSUP,
15620                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15621                                         NULL, "cannot setup "
15622                                         "policy port action");
15623                                 act_cnt->rix_port_id_action =
15624                                         dev_flow.handle->rix_port_id_action;
15625                                 act_cnt->fate_action =
15626                                         MLX5_FLOW_FATE_PORT_ID;
15627                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15628                                 break;
15629                         }
15630                         case RTE_FLOW_ACTION_TYPE_JUMP:
15631                         {
15632                                 uint32_t jump_group = 0;
15633                                 uint32_t table = 0;
15634                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15635                                 struct flow_grp_info grp_info = {
15636                                         .external = !!dev_flow.external,
15637                                         .transfer = !!transfer,
15638                                         .fdb_def_rule = !!priv->fdb_def_rule,
15639                                         .std_tbl_fix = 0,
15640                                         .skip_scale = dev_flow.skip_scale &
15641                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15642                                 };
15643                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15644                                         mtr_policy->sub_policys[domain][0];
15645
15646                                 if (i >= MLX5_MTR_RTE_COLORS)
15647                                         return -rte_mtr_error_set(error,
15648                                           ENOTSUP,
15649                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15650                                           NULL,
15651                                           "cannot create policy "
15652                                           "jump action for this color");
15653                                 jump_group =
15654                                 ((const struct rte_flow_action_jump *)
15655                                                         act->conf)->group;
15656                                 if (mlx5_flow_group_to_table(dev, NULL,
15657                                                        jump_group,
15658                                                        &table,
15659                                                        &grp_info, &flow_err))
15660                                         return -rte_mtr_error_set(error,
15661                                         ENOTSUP,
15662                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15663                                         NULL, "cannot setup "
15664                                         "policy jump action");
15665                                 sub_policy->jump_tbl[i] =
15666                                 flow_dv_tbl_resource_get(dev,
15667                                         table, egress,
15668                                         transfer,
15669                                         !!dev_flow.external,
15670                                         NULL, jump_group, 0,
15671                                         0, &flow_err);
15672                                 if
15673                                 (!sub_policy->jump_tbl[i])
15674                                         return  -rte_mtr_error_set(error,
15675                                         ENOTSUP,
15676                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15677                                         NULL, "cannot create jump action.");
15678                                 tbl_data = container_of
15679                                 (sub_policy->jump_tbl[i],
15680                                 struct mlx5_flow_tbl_data_entry, tbl);
15681                                 act_cnt->dr_jump_action[domain] =
15682                                         tbl_data->jump.action;
15683                                 act_cnt->fate_action =
15684                                         MLX5_FLOW_FATE_JUMP;
15685                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15686                                 break;
15687                         }
15688                         /*
15689                          * No need to check meter hierarchy for Y or R colors
15690                          * here since it is done in the validation stage.
15691                          */
15692                         case RTE_FLOW_ACTION_TYPE_METER:
15693                         {
15694                                 const struct rte_flow_action_meter *mtr;
15695                                 struct mlx5_flow_meter_info *next_fm;
15696                                 struct mlx5_flow_meter_policy *next_policy;
15697                                 struct rte_flow_action tag_action;
15698                                 struct mlx5_rte_flow_action_set_tag set_tag;
15699                                 uint32_t next_mtr_idx = 0;
15700
15701                                 mtr = act->conf;
15702                                 next_fm = mlx5_flow_meter_find(priv,
15703                                                         mtr->mtr_id,
15704                                                         &next_mtr_idx);
15705                                 if (!next_fm)
15706                                         return -rte_mtr_error_set(error, EINVAL,
15707                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15708                                                 "Fail to find next meter.");
15709                                 if (next_fm->def_policy)
15710                                         return -rte_mtr_error_set(error, EINVAL,
15711                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15712                                 "Hierarchy only supports termination meter.");
15713                                 next_policy = mlx5_flow_meter_policy_find(dev,
15714                                                 next_fm->policy_id, NULL);
15715                                 MLX5_ASSERT(next_policy);
15716                                 if (next_fm->drop_cnt) {
15717                                         set_tag.id =
15718                                                 (enum modify_reg)
15719                                                 mlx5_flow_get_reg_id(dev,
15720                                                 MLX5_MTR_ID,
15721                                                 0,
15722                                                 (struct rte_flow_error *)error);
15723                                         set_tag.offset = (priv->mtr_reg_share ?
15724                                                 MLX5_MTR_COLOR_BITS : 0);
15725                                         set_tag.length = (priv->mtr_reg_share ?
15726                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15727                                                MLX5_REG_BITS);
15728                                         set_tag.data = next_mtr_idx;
15729                                         tag_action.type =
15730                                                 (enum rte_flow_action_type)
15731                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15732                                         tag_action.conf = &set_tag;
15733                                         if (flow_dv_convert_action_set_reg
15734                                                 (mhdr_res, &tag_action,
15735                                                 (struct rte_flow_error *)error))
15736                                                 return -rte_errno;
15737                                         action_flags |=
15738                                                 MLX5_FLOW_ACTION_SET_TAG;
15739                                 }
15740                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15741                                 act_cnt->next_mtr_id = next_fm->meter_id;
15742                                 act_cnt->next_sub_policy = NULL;
15743                                 mtr_policy->is_hierarchy = 1;
15744                                 mtr_policy->dev = next_policy->dev;
15745                                 action_flags |=
15746                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15747                                 break;
15748                         }
15749                         default:
15750                                 return -rte_mtr_error_set(error, ENOTSUP,
15751                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15752                                           NULL, "action type not supported");
15753                         }
15754                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15755                                 /* create modify action if needed. */
15756                                 dev_flow.dv.group = 1;
15757                                 if (flow_dv_modify_hdr_resource_register
15758                                         (dev, mhdr_res, &dev_flow, &flow_err))
15759                                         return -rte_mtr_error_set(error,
15760                                                 ENOTSUP,
15761                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15762                                                 NULL, "cannot register policy "
15763                                                 "set tag action");
15764                                 act_cnt->modify_hdr =
15765                                         dev_flow.handle->dvh.modify_hdr;
15766                         }
15767                 }
15768         }
15769         return 0;
15770 }
15771
15772 /**
15773  * Create policy action per domain, lock free,
15774  * (mutex should be acquired by caller).
15775  * Dispatcher for action type specific call.
15776  *
15777  * @param[in] dev
15778  *   Pointer to the Ethernet device structure.
15779  * @param[in] mtr_policy
15780  *   Meter policy struct.
15781  * @param[in] action
15782  *   Action specification used to create meter actions.
15783  * @param[out] error
15784  *   Perform verbose error reporting if not NULL. Initialized in case of
15785  *   error only.
15786  *
15787  * @return
15788  *   0 on success, otherwise negative errno value.
15789  */
15790 static int
15791 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15792                       struct mlx5_flow_meter_policy *mtr_policy,
15793                       const struct rte_flow_action *actions[RTE_COLORS],
15794                       struct rte_mtr_error *error)
15795 {
15796         int ret, i;
15797         uint16_t sub_policy_num;
15798
15799         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15800                 sub_policy_num = (mtr_policy->sub_policy_num >>
15801                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15802                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15803                 if (sub_policy_num) {
15804                         ret = __flow_dv_create_domain_policy_acts(dev,
15805                                 mtr_policy, actions,
15806                                 (enum mlx5_meter_domain)i, error);
15807                         /* Cleaning resource is done in the caller level. */
15808                         if (ret)
15809                                 return ret;
15810                 }
15811         }
15812         return 0;
15813 }
15814
15815 /**
15816  * Query a DV flow rule for its statistics via DevX.
15817  *
15818  * @param[in] dev
15819  *   Pointer to Ethernet device.
15820  * @param[in] cnt_idx
15821  *   Index to the flow counter.
15822  * @param[out] data
15823  *   Data retrieved by the query.
15824  * @param[out] error
15825  *   Perform verbose error reporting if not NULL.
15826  *
15827  * @return
15828  *   0 on success, a negative errno value otherwise and rte_errno is set.
15829  */
15830 int
15831 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15832                     struct rte_flow_error *error)
15833 {
15834         struct mlx5_priv *priv = dev->data->dev_private;
15835         struct rte_flow_query_count *qc = data;
15836
15837         if (!priv->sh->devx)
15838                 return rte_flow_error_set(error, ENOTSUP,
15839                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15840                                           NULL,
15841                                           "counters are not supported");
15842         if (cnt_idx) {
15843                 uint64_t pkts, bytes;
15844                 struct mlx5_flow_counter *cnt;
15845                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15846
15847                 if (err)
15848                         return rte_flow_error_set(error, -err,
15849                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15850                                         NULL, "cannot read counters");
15851                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15852                 qc->hits_set = 1;
15853                 qc->bytes_set = 1;
15854                 qc->hits = pkts - cnt->hits;
15855                 qc->bytes = bytes - cnt->bytes;
15856                 if (qc->reset) {
15857                         cnt->hits = pkts;
15858                         cnt->bytes = bytes;
15859                 }
15860                 return 0;
15861         }
15862         return rte_flow_error_set(error, EINVAL,
15863                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15864                                   NULL,
15865                                   "counters are not available");
15866 }
15867
15868
15869 /**
15870  * Query counter's action pointer for a DV flow rule via DevX.
15871  *
15872  * @param[in] dev
15873  *   Pointer to Ethernet device.
15874  * @param[in] cnt_idx
15875  *   Index to the flow counter.
15876  * @param[out] action_ptr
15877  *   Action pointer for counter.
15878  * @param[out] error
15879  *   Perform verbose error reporting if not NULL.
15880  *
15881  * @return
15882  *   0 on success, a negative errno value otherwise and rte_errno is set.
15883  */
15884 int
15885 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15886         void **action_ptr, struct rte_flow_error *error)
15887 {
15888         struct mlx5_priv *priv = dev->data->dev_private;
15889
15890         if (!priv->sh->devx || !action_ptr)
15891                 return rte_flow_error_set(error, ENOTSUP,
15892                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15893                                           NULL,
15894                                           "counters are not supported");
15895
15896         if (cnt_idx) {
15897                 struct mlx5_flow_counter *cnt = NULL;
15898                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15899                 if (cnt) {
15900                         *action_ptr = cnt->action;
15901                         return 0;
15902                 }
15903         }
15904         return rte_flow_error_set(error, EINVAL,
15905                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15906                                   NULL,
15907                                   "counters are not available");
15908 }
15909
15910 static int
15911 flow_dv_action_query(struct rte_eth_dev *dev,
15912                      const struct rte_flow_action_handle *handle, void *data,
15913                      struct rte_flow_error *error)
15914 {
15915         struct mlx5_age_param *age_param;
15916         struct rte_flow_query_age *resp;
15917         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15918         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15919         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15920         struct mlx5_priv *priv = dev->data->dev_private;
15921         struct mlx5_aso_ct_action *ct;
15922         uint16_t owner;
15923         uint32_t dev_idx;
15924
15925         switch (type) {
15926         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15927                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15928                 resp = data;
15929                 resp->aged = __atomic_load_n(&age_param->state,
15930                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15931                                                                           1 : 0;
15932                 resp->sec_since_last_hit_valid = !resp->aged;
15933                 if (resp->sec_since_last_hit_valid)
15934                         resp->sec_since_last_hit = __atomic_load_n
15935                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15936                 return 0;
15937         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15938                 return flow_dv_query_count(dev, idx, data, error);
15939         case MLX5_INDIRECT_ACTION_TYPE_CT:
15940                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15941                 if (owner != PORT_ID(priv))
15942                         return rte_flow_error_set(error, EACCES,
15943                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15944                                         NULL,
15945                                         "CT object owned by another port");
15946                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15947                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15948                 MLX5_ASSERT(ct);
15949                 if (!ct->refcnt)
15950                         return rte_flow_error_set(error, EFAULT,
15951                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15952                                         NULL,
15953                                         "CT object is inactive");
15954                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15955                                                         ct->peer;
15956                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15957                                                         ct->is_original;
15958                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15959                         return rte_flow_error_set(error, EIO,
15960                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15961                                         NULL,
15962                                         "Failed to query CT context");
15963                 return 0;
15964         default:
15965                 return rte_flow_error_set(error, ENOTSUP,
15966                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15967                                           "action type query not supported");
15968         }
15969 }
15970
15971 /**
15972  * Query a flow rule AGE action for aging information.
15973  *
15974  * @param[in] dev
15975  *   Pointer to Ethernet device.
15976  * @param[in] flow
15977  *   Pointer to the sub flow.
15978  * @param[out] data
15979  *   data retrieved by the query.
15980  * @param[out] error
15981  *   Perform verbose error reporting if not NULL.
15982  *
15983  * @return
15984  *   0 on success, a negative errno value otherwise and rte_errno is set.
15985  */
15986 static int
15987 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15988                   void *data, struct rte_flow_error *error)
15989 {
15990         struct rte_flow_query_age *resp = data;
15991         struct mlx5_age_param *age_param;
15992
15993         if (flow->age) {
15994                 struct mlx5_aso_age_action *act =
15995                                      flow_aso_age_get_by_idx(dev, flow->age);
15996
15997                 age_param = &act->age_params;
15998         } else if (flow->counter) {
15999                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
16000
16001                 if (!age_param || !age_param->timeout)
16002                         return rte_flow_error_set
16003                                         (error, EINVAL,
16004                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16005                                          NULL, "cannot read age data");
16006         } else {
16007                 return rte_flow_error_set(error, EINVAL,
16008                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16009                                           NULL, "age data not available");
16010         }
16011         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16012                                      AGE_TMOUT ? 1 : 0;
16013         resp->sec_since_last_hit_valid = !resp->aged;
16014         if (resp->sec_since_last_hit_valid)
16015                 resp->sec_since_last_hit = __atomic_load_n
16016                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16017         return 0;
16018 }
16019
16020 /**
16021  * Query a flow.
16022  *
16023  * @see rte_flow_query()
16024  * @see rte_flow_ops
16025  */
16026 static int
16027 flow_dv_query(struct rte_eth_dev *dev,
16028               struct rte_flow *flow __rte_unused,
16029               const struct rte_flow_action *actions __rte_unused,
16030               void *data __rte_unused,
16031               struct rte_flow_error *error __rte_unused)
16032 {
16033         int ret = -EINVAL;
16034
16035         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16036                 switch (actions->type) {
16037                 case RTE_FLOW_ACTION_TYPE_VOID:
16038                         break;
16039                 case RTE_FLOW_ACTION_TYPE_COUNT:
16040                         ret = flow_dv_query_count(dev, flow->counter, data,
16041                                                   error);
16042                         break;
16043                 case RTE_FLOW_ACTION_TYPE_AGE:
16044                         ret = flow_dv_query_age(dev, flow, data, error);
16045                         break;
16046                 default:
16047                         return rte_flow_error_set(error, ENOTSUP,
16048                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16049                                                   actions,
16050                                                   "action not supported");
16051                 }
16052         }
16053         return ret;
16054 }
16055
16056 /**
16057  * Destroy the meter table set.
16058  * Lock free, (mutex should be acquired by caller).
16059  *
16060  * @param[in] dev
16061  *   Pointer to Ethernet device.
16062  * @param[in] fm
16063  *   Meter information table.
16064  */
16065 static void
16066 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16067                         struct mlx5_flow_meter_info *fm)
16068 {
16069         struct mlx5_priv *priv = dev->data->dev_private;
16070         int i;
16071
16072         if (!fm || !priv->config.dv_flow_en)
16073                 return;
16074         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16075                 if (fm->drop_rule[i]) {
16076                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16077                         fm->drop_rule[i] = NULL;
16078                 }
16079         }
16080 }
16081
16082 static void
16083 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16084 {
16085         struct mlx5_priv *priv = dev->data->dev_private;
16086         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16087         struct mlx5_flow_tbl_data_entry *tbl;
16088         int i, j;
16089
16090         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16091                 if (mtrmng->def_rule[i]) {
16092                         claim_zero(mlx5_flow_os_destroy_flow
16093                                         (mtrmng->def_rule[i]));
16094                         mtrmng->def_rule[i] = NULL;
16095                 }
16096                 if (mtrmng->def_matcher[i]) {
16097                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16098                                 struct mlx5_flow_tbl_data_entry, tbl);
16099                         mlx5_list_unregister(tbl->matchers,
16100                                              &mtrmng->def_matcher[i]->entry);
16101                         mtrmng->def_matcher[i] = NULL;
16102                 }
16103                 for (j = 0; j < MLX5_REG_BITS; j++) {
16104                         if (mtrmng->drop_matcher[i][j]) {
16105                                 tbl =
16106                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16107                                              struct mlx5_flow_tbl_data_entry,
16108                                              tbl);
16109                                 mlx5_list_unregister(tbl->matchers,
16110                                             &mtrmng->drop_matcher[i][j]->entry);
16111                                 mtrmng->drop_matcher[i][j] = NULL;
16112                         }
16113                 }
16114                 if (mtrmng->drop_tbl[i]) {
16115                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16116                                 mtrmng->drop_tbl[i]);
16117                         mtrmng->drop_tbl[i] = NULL;
16118                 }
16119         }
16120 }
16121
16122 /* Number of meter flow actions, count and jump or count and drop. */
16123 #define METER_ACTIONS 2
16124
16125 static void
16126 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16127                                     enum mlx5_meter_domain domain)
16128 {
16129         struct mlx5_priv *priv = dev->data->dev_private;
16130         struct mlx5_flow_meter_def_policy *def_policy =
16131                         priv->sh->mtrmng->def_policy[domain];
16132
16133         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16134         mlx5_free(def_policy);
16135         priv->sh->mtrmng->def_policy[domain] = NULL;
16136 }
16137
16138 /**
16139  * Destroy the default policy table set.
16140  *
16141  * @param[in] dev
16142  *   Pointer to Ethernet device.
16143  */
16144 static void
16145 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16146 {
16147         struct mlx5_priv *priv = dev->data->dev_private;
16148         int i;
16149
16150         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16151                 if (priv->sh->mtrmng->def_policy[i])
16152                         __flow_dv_destroy_domain_def_policy(dev,
16153                                         (enum mlx5_meter_domain)i);
16154         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16155 }
16156
16157 static int
16158 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16159                         uint32_t color_reg_c_idx,
16160                         enum rte_color color, void *matcher_object,
16161                         int actions_n, void *actions,
16162                         bool match_src_port, const struct rte_flow_item *item,
16163                         void **rule, const struct rte_flow_attr *attr)
16164 {
16165         int ret;
16166         struct mlx5_flow_dv_match_params value = {
16167                 .size = sizeof(value.buf),
16168         };
16169         struct mlx5_flow_dv_match_params matcher = {
16170                 .size = sizeof(matcher.buf),
16171         };
16172         struct mlx5_priv *priv = dev->data->dev_private;
16173         uint8_t misc_mask;
16174
16175         if (match_src_port && (priv->representor || priv->master)) {
16176                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16177                                                    value.buf, item, attr)) {
16178                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16179                                 " value with port.", color);
16180                         return -1;
16181                 }
16182         }
16183         flow_dv_match_meta_reg(matcher.buf, value.buf,
16184                                (enum modify_reg)color_reg_c_idx,
16185                                rte_col_2_mlx5_col(color), UINT32_MAX);
16186         misc_mask = flow_dv_matcher_enable(value.buf);
16187         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16188         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16189                                        actions_n, actions, rule);
16190         if (ret) {
16191                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16192                 return -1;
16193         }
16194         return 0;
16195 }
16196
16197 static int
16198 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16199                         uint32_t color_reg_c_idx,
16200                         uint16_t priority,
16201                         struct mlx5_flow_meter_sub_policy *sub_policy,
16202                         const struct rte_flow_attr *attr,
16203                         bool match_src_port,
16204                         const struct rte_flow_item *item,
16205                         struct mlx5_flow_dv_matcher **policy_matcher,
16206                         struct rte_flow_error *error)
16207 {
16208         struct mlx5_list_entry *entry;
16209         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16210         struct mlx5_flow_dv_matcher matcher = {
16211                 .mask = {
16212                         .size = sizeof(matcher.mask.buf),
16213                 },
16214                 .tbl = tbl_rsc,
16215         };
16216         struct mlx5_flow_dv_match_params value = {
16217                 .size = sizeof(value.buf),
16218         };
16219         struct mlx5_flow_cb_ctx ctx = {
16220                 .error = error,
16221                 .data = &matcher,
16222         };
16223         struct mlx5_flow_tbl_data_entry *tbl_data;
16224         struct mlx5_priv *priv = dev->data->dev_private;
16225         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16226
16227         if (match_src_port && (priv->representor || priv->master)) {
16228                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16229                                                    value.buf, item, attr)) {
16230                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16231                                 " with port.", priority);
16232                         return -1;
16233                 }
16234         }
16235         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16236         if (priority < RTE_COLOR_RED)
16237                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16238                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16239         matcher.priority = priority;
16240         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16241                                     matcher.mask.size);
16242         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16243         if (!entry) {
16244                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16245                 return -1;
16246         }
16247         *policy_matcher =
16248                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16249         return 0;
16250 }
16251
16252 /**
16253  * Create the policy rules per domain.
16254  *
16255  * @param[in] dev
16256  *   Pointer to Ethernet device.
16257  * @param[in] sub_policy
16258  *    Pointer to sub policy table..
16259  * @param[in] egress
16260  *   Direction of the table.
16261  * @param[in] transfer
16262  *   E-Switch or NIC flow.
16263  * @param[in] acts
16264  *   Pointer to policy action list per color.
16265  *
16266  * @return
16267  *   0 on success, -1 otherwise.
16268  */
16269 static int
16270 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16271                 struct mlx5_flow_meter_sub_policy *sub_policy,
16272                 uint8_t egress, uint8_t transfer, bool match_src_port,
16273                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16274 {
16275         struct mlx5_priv *priv = dev->data->dev_private;
16276         struct rte_flow_error flow_err;
16277         uint32_t color_reg_c_idx;
16278         struct rte_flow_attr attr = {
16279                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16280                 .priority = 0,
16281                 .ingress = 0,
16282                 .egress = !!egress,
16283                 .transfer = !!transfer,
16284                 .reserved = 0,
16285         };
16286         int i;
16287         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16288         struct mlx5_sub_policy_color_rule *color_rule;
16289         bool svport_match;
16290         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16291
16292         if (ret < 0)
16293                 return -1;
16294         /* Create policy table with POLICY level. */
16295         if (!sub_policy->tbl_rsc)
16296                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16297                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16298                                 egress, transfer, false, NULL, 0, 0,
16299                                 sub_policy->idx, &flow_err);
16300         if (!sub_policy->tbl_rsc) {
16301                 DRV_LOG(ERR,
16302                         "Failed to create meter sub policy table.");
16303                 return -1;
16304         }
16305         /* Prepare matchers. */
16306         color_reg_c_idx = ret;
16307         for (i = 0; i < RTE_COLORS; i++) {
16308                 TAILQ_INIT(&sub_policy->color_rules[i]);
16309                 if (!acts[i].actions_n)
16310                         continue;
16311                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16312                                 sizeof(struct mlx5_sub_policy_color_rule),
16313                                 0, SOCKET_ID_ANY);
16314                 if (!color_rule) {
16315                         DRV_LOG(ERR, "No memory to create color rule.");
16316                         goto err_exit;
16317                 }
16318                 tmp_rules[i] = color_rule;
16319                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16320                                   color_rule, next_port);
16321                 color_rule->src_port = priv->representor_id;
16322                 /* No use. */
16323                 attr.priority = i;
16324                 /* Create matchers for colors. */
16325                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16326                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16327                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16328                                 &attr, svport_match, NULL,
16329                                 &color_rule->matcher, &flow_err)) {
16330                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16331                         goto err_exit;
16332                 }
16333                 /* Create flow, matching color. */
16334                 if (__flow_dv_create_policy_flow(dev,
16335                                 color_reg_c_idx, (enum rte_color)i,
16336                                 color_rule->matcher->matcher_object,
16337                                 acts[i].actions_n, acts[i].dv_actions,
16338                                 svport_match, NULL, &color_rule->rule,
16339                                 &attr)) {
16340                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16341                         goto err_exit;
16342                 }
16343         }
16344         return 0;
16345 err_exit:
16346         /* All the policy rules will be cleared. */
16347         do {
16348                 color_rule = tmp_rules[i];
16349                 if (color_rule) {
16350                         if (color_rule->rule)
16351                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16352                         if (color_rule->matcher) {
16353                                 struct mlx5_flow_tbl_data_entry *tbl =
16354                                         container_of(color_rule->matcher->tbl,
16355                                                      typeof(*tbl), tbl);
16356                                 mlx5_list_unregister(tbl->matchers,
16357                                                 &color_rule->matcher->entry);
16358                         }
16359                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16360                                      color_rule, next_port);
16361                         mlx5_free(color_rule);
16362                 }
16363         } while (i--);
16364         return -1;
16365 }
16366
16367 static int
16368 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16369                         struct mlx5_flow_meter_policy *mtr_policy,
16370                         struct mlx5_flow_meter_sub_policy *sub_policy,
16371                         uint32_t domain)
16372 {
16373         struct mlx5_priv *priv = dev->data->dev_private;
16374         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16375         struct mlx5_flow_dv_tag_resource *tag;
16376         struct mlx5_flow_dv_port_id_action_resource *port_action;
16377         struct mlx5_hrxq *hrxq;
16378         struct mlx5_flow_meter_info *next_fm = NULL;
16379         struct mlx5_flow_meter_policy *next_policy;
16380         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16381         struct mlx5_flow_tbl_data_entry *tbl_data;
16382         struct rte_flow_error error;
16383         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16384         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16385         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16386         bool match_src_port = false;
16387         int i;
16388
16389         /* If RSS or Queue, no previous actions / rules is created. */
16390         for (i = 0; i < RTE_COLORS; i++) {
16391                 acts[i].actions_n = 0;
16392                 if (i == RTE_COLOR_RED) {
16393                         /* Only support drop on red. */
16394                         acts[i].dv_actions[0] =
16395                                 mtr_policy->dr_drop_action[domain];
16396                         acts[i].actions_n = 1;
16397                         continue;
16398                 }
16399                 if (i == RTE_COLOR_GREEN &&
16400                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16401                         struct rte_flow_attr attr = {
16402                                 .transfer = transfer
16403                         };
16404
16405                         next_fm = mlx5_flow_meter_find(priv,
16406                                         mtr_policy->act_cnt[i].next_mtr_id,
16407                                         NULL);
16408                         if (!next_fm) {
16409                                 DRV_LOG(ERR,
16410                                         "Failed to get next hierarchy meter.");
16411                                 goto err_exit;
16412                         }
16413                         if (mlx5_flow_meter_attach(priv, next_fm,
16414                                                    &attr, &error)) {
16415                                 DRV_LOG(ERR, "%s", error.message);
16416                                 next_fm = NULL;
16417                                 goto err_exit;
16418                         }
16419                         /* Meter action must be the first for TX. */
16420                         if (mtr_first) {
16421                                 acts[i].dv_actions[acts[i].actions_n] =
16422                                         next_fm->meter_action;
16423                                 acts[i].actions_n++;
16424                         }
16425                 }
16426                 if (mtr_policy->act_cnt[i].rix_mark) {
16427                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16428                                         mtr_policy->act_cnt[i].rix_mark);
16429                         if (!tag) {
16430                                 DRV_LOG(ERR, "Failed to find "
16431                                 "mark action for policy.");
16432                                 goto err_exit;
16433                         }
16434                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16435                         acts[i].actions_n++;
16436                 }
16437                 if (mtr_policy->act_cnt[i].modify_hdr) {
16438                         acts[i].dv_actions[acts[i].actions_n] =
16439                                 mtr_policy->act_cnt[i].modify_hdr->action;
16440                         acts[i].actions_n++;
16441                 }
16442                 if (mtr_policy->act_cnt[i].fate_action) {
16443                         switch (mtr_policy->act_cnt[i].fate_action) {
16444                         case MLX5_FLOW_FATE_PORT_ID:
16445                                 port_action = mlx5_ipool_get
16446                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16447                                 mtr_policy->act_cnt[i].rix_port_id_action);
16448                                 if (!port_action) {
16449                                         DRV_LOG(ERR, "Failed to find "
16450                                                 "port action for policy.");
16451                                         goto err_exit;
16452                                 }
16453                                 acts[i].dv_actions[acts[i].actions_n] =
16454                                         port_action->action;
16455                                 acts[i].actions_n++;
16456                                 mtr_policy->dev = dev;
16457                                 match_src_port = true;
16458                                 break;
16459                         case MLX5_FLOW_FATE_DROP:
16460                         case MLX5_FLOW_FATE_JUMP:
16461                                 acts[i].dv_actions[acts[i].actions_n] =
16462                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16463                                 acts[i].actions_n++;
16464                                 break;
16465                         case MLX5_FLOW_FATE_SHARED_RSS:
16466                         case MLX5_FLOW_FATE_QUEUE:
16467                                 hrxq = mlx5_ipool_get
16468                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16469                                          sub_policy->rix_hrxq[i]);
16470                                 if (!hrxq) {
16471                                         DRV_LOG(ERR, "Failed to find "
16472                                                 "queue action for policy.");
16473                                         goto err_exit;
16474                                 }
16475                                 acts[i].dv_actions[acts[i].actions_n] =
16476                                         hrxq->action;
16477                                 acts[i].actions_n++;
16478                                 break;
16479                         case MLX5_FLOW_FATE_MTR:
16480                                 if (!next_fm) {
16481                                         DRV_LOG(ERR,
16482                                                 "No next hierarchy meter.");
16483                                         goto err_exit;
16484                                 }
16485                                 if (!mtr_first) {
16486                                         acts[i].dv_actions[acts[i].actions_n] =
16487                                                         next_fm->meter_action;
16488                                         acts[i].actions_n++;
16489                                 }
16490                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16491                                         next_sub_policy =
16492                                         mtr_policy->act_cnt[i].next_sub_policy;
16493                                 } else {
16494                                         next_policy =
16495                                                 mlx5_flow_meter_policy_find(dev,
16496                                                 next_fm->policy_id, NULL);
16497                                         MLX5_ASSERT(next_policy);
16498                                         next_sub_policy =
16499                                         next_policy->sub_policys[domain][0];
16500                                 }
16501                                 tbl_data =
16502                                         container_of(next_sub_policy->tbl_rsc,
16503                                         struct mlx5_flow_tbl_data_entry, tbl);
16504                                 acts[i].dv_actions[acts[i].actions_n++] =
16505                                                         tbl_data->jump.action;
16506                                 if (mtr_policy->act_cnt[i].modify_hdr)
16507                                         match_src_port = !!transfer;
16508                                 break;
16509                         default:
16510                                 /*Queue action do nothing*/
16511                                 break;
16512                         }
16513                 }
16514         }
16515         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16516                                 egress, transfer, match_src_port, acts)) {
16517                 DRV_LOG(ERR,
16518                         "Failed to create policy rules per domain.");
16519                 goto err_exit;
16520         }
16521         return 0;
16522 err_exit:
16523         if (next_fm)
16524                 mlx5_flow_meter_detach(priv, next_fm);
16525         return -1;
16526 }
16527
16528 /**
16529  * Create the policy rules.
16530  *
16531  * @param[in] dev
16532  *   Pointer to Ethernet device.
16533  * @param[in,out] mtr_policy
16534  *   Pointer to meter policy table.
16535  *
16536  * @return
16537  *   0 on success, -1 otherwise.
16538  */
16539 static int
16540 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16541                              struct mlx5_flow_meter_policy *mtr_policy)
16542 {
16543         int i;
16544         uint16_t sub_policy_num;
16545
16546         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16547                 sub_policy_num = (mtr_policy->sub_policy_num >>
16548                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16549                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16550                 if (!sub_policy_num)
16551                         continue;
16552                 /* Prepare actions list and create policy rules. */
16553                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16554                         mtr_policy->sub_policys[i][0], i)) {
16555                         DRV_LOG(ERR, "Failed to create policy action "
16556                                 "list per domain.");
16557                         return -1;
16558                 }
16559         }
16560         return 0;
16561 }
16562
16563 static int
16564 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16565 {
16566         struct mlx5_priv *priv = dev->data->dev_private;
16567         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16568         struct mlx5_flow_meter_def_policy *def_policy;
16569         struct mlx5_flow_tbl_resource *jump_tbl;
16570         struct mlx5_flow_tbl_data_entry *tbl_data;
16571         uint8_t egress, transfer;
16572         struct rte_flow_error error;
16573         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16574         int ret;
16575
16576         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16577         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16578         def_policy = mtrmng->def_policy[domain];
16579         if (!def_policy) {
16580                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16581                         sizeof(struct mlx5_flow_meter_def_policy),
16582                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16583                 if (!def_policy) {
16584                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16585                         goto def_policy_error;
16586                 }
16587                 mtrmng->def_policy[domain] = def_policy;
16588                 /* Create the meter suffix table with SUFFIX level. */
16589                 jump_tbl = flow_dv_tbl_resource_get(dev,
16590                                 MLX5_FLOW_TABLE_LEVEL_METER,
16591                                 egress, transfer, false, NULL, 0,
16592                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16593                 if (!jump_tbl) {
16594                         DRV_LOG(ERR,
16595                                 "Failed to create meter suffix table.");
16596                         goto def_policy_error;
16597                 }
16598                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16599                 tbl_data = container_of(jump_tbl,
16600                                         struct mlx5_flow_tbl_data_entry, tbl);
16601                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16602                                                 tbl_data->jump.action;
16603                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16604                 acts[RTE_COLOR_GREEN].actions_n = 1;
16605                 /*
16606                  * YELLOW has the same default policy as GREEN does.
16607                  * G & Y share the same table and action. The 2nd time of table
16608                  * resource getting is just to update the reference count for
16609                  * the releasing stage.
16610                  */
16611                 jump_tbl = flow_dv_tbl_resource_get(dev,
16612                                 MLX5_FLOW_TABLE_LEVEL_METER,
16613                                 egress, transfer, false, NULL, 0,
16614                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16615                 if (!jump_tbl) {
16616                         DRV_LOG(ERR,
16617                                 "Failed to get meter suffix table.");
16618                         goto def_policy_error;
16619                 }
16620                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16621                 tbl_data = container_of(jump_tbl,
16622                                         struct mlx5_flow_tbl_data_entry, tbl);
16623                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16624                                                 tbl_data->jump.action;
16625                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16626                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16627                 /* Create jump action to the drop table. */
16628                 if (!mtrmng->drop_tbl[domain]) {
16629                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16630                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16631                                  egress, transfer, false, NULL, 0,
16632                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16633                         if (!mtrmng->drop_tbl[domain]) {
16634                                 DRV_LOG(ERR, "Failed to create meter "
16635                                         "drop table for default policy.");
16636                                 goto def_policy_error;
16637                         }
16638                 }
16639                 /* all RED: unique Drop table for jump action. */
16640                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16641                                         struct mlx5_flow_tbl_data_entry, tbl);
16642                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16643                                                 tbl_data->jump.action;
16644                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16645                 acts[RTE_COLOR_RED].actions_n = 1;
16646                 /* Create default policy rules. */
16647                 ret = __flow_dv_create_domain_policy_rules(dev,
16648                                         &def_policy->sub_policy,
16649                                         egress, transfer, false, acts);
16650                 if (ret) {
16651                         DRV_LOG(ERR, "Failed to create default policy rules.");
16652                         goto def_policy_error;
16653                 }
16654         }
16655         return 0;
16656 def_policy_error:
16657         __flow_dv_destroy_domain_def_policy(dev,
16658                                             (enum mlx5_meter_domain)domain);
16659         return -1;
16660 }
16661
16662 /**
16663  * Create the default policy table set.
16664  *
16665  * @param[in] dev
16666  *   Pointer to Ethernet device.
16667  * @return
16668  *   0 on success, -1 otherwise.
16669  */
16670 static int
16671 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16672 {
16673         struct mlx5_priv *priv = dev->data->dev_private;
16674         int i;
16675
16676         /* Non-termination policy table. */
16677         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16678                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16679                         continue;
16680                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16681                         DRV_LOG(ERR, "Failed to create default policy");
16682                         /* Rollback the created default policies for others. */
16683                         flow_dv_destroy_def_policy(dev);
16684                         return -1;
16685                 }
16686         }
16687         return 0;
16688 }
16689
16690 /**
16691  * Create the needed meter tables.
16692  * Lock free, (mutex should be acquired by caller).
16693  *
16694  * @param[in] dev
16695  *   Pointer to Ethernet device.
16696  * @param[in] fm
16697  *   Meter information table.
16698  * @param[in] mtr_idx
16699  *   Meter index.
16700  * @param[in] domain_bitmap
16701  *   Domain bitmap.
16702  * @return
16703  *   0 on success, -1 otherwise.
16704  */
16705 static int
16706 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16707                         struct mlx5_flow_meter_info *fm,
16708                         uint32_t mtr_idx,
16709                         uint8_t domain_bitmap)
16710 {
16711         struct mlx5_priv *priv = dev->data->dev_private;
16712         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16713         struct rte_flow_error error;
16714         struct mlx5_flow_tbl_data_entry *tbl_data;
16715         uint8_t egress, transfer;
16716         void *actions[METER_ACTIONS];
16717         int domain, ret, i;
16718         struct mlx5_flow_counter *cnt;
16719         struct mlx5_flow_dv_match_params value = {
16720                 .size = sizeof(value.buf),
16721         };
16722         struct mlx5_flow_dv_match_params matcher_para = {
16723                 .size = sizeof(matcher_para.buf),
16724         };
16725         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16726                                                      0, &error);
16727         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16728         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16729         struct mlx5_list_entry *entry;
16730         struct mlx5_flow_dv_matcher matcher = {
16731                 .mask = {
16732                         .size = sizeof(matcher.mask.buf),
16733                 },
16734         };
16735         struct mlx5_flow_dv_matcher *drop_matcher;
16736         struct mlx5_flow_cb_ctx ctx = {
16737                 .error = &error,
16738                 .data = &matcher,
16739         };
16740         uint8_t misc_mask;
16741
16742         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16743                 rte_errno = ENOTSUP;
16744                 return -1;
16745         }
16746         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16747                 if (!(domain_bitmap & (1 << domain)) ||
16748                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16749                         continue;
16750                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16751                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16752                 /* Create the drop table with METER DROP level. */
16753                 if (!mtrmng->drop_tbl[domain]) {
16754                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16755                                         MLX5_FLOW_TABLE_LEVEL_METER,
16756                                         egress, transfer, false, NULL, 0,
16757                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16758                         if (!mtrmng->drop_tbl[domain]) {
16759                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16760                                 goto policy_error;
16761                         }
16762                 }
16763                 /* Create default matcher in drop table. */
16764                 matcher.tbl = mtrmng->drop_tbl[domain],
16765                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16766                                 struct mlx5_flow_tbl_data_entry, tbl);
16767                 if (!mtrmng->def_matcher[domain]) {
16768                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16769                                        (enum modify_reg)mtr_id_reg_c,
16770                                        0, 0);
16771                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16772                         matcher.crc = rte_raw_cksum
16773                                         ((const void *)matcher.mask.buf,
16774                                         matcher.mask.size);
16775                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16776                         if (!entry) {
16777                                 DRV_LOG(ERR, "Failed to register meter "
16778                                 "drop default matcher.");
16779                                 goto policy_error;
16780                         }
16781                         mtrmng->def_matcher[domain] = container_of(entry,
16782                         struct mlx5_flow_dv_matcher, entry);
16783                 }
16784                 /* Create default rule in drop table. */
16785                 if (!mtrmng->def_rule[domain]) {
16786                         i = 0;
16787                         actions[i++] = priv->sh->dr_drop_action;
16788                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16789                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16790                         misc_mask = flow_dv_matcher_enable(value.buf);
16791                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16792                         ret = mlx5_flow_os_create_flow
16793                                 (mtrmng->def_matcher[domain]->matcher_object,
16794                                 (void *)&value, i, actions,
16795                                 &mtrmng->def_rule[domain]);
16796                         if (ret) {
16797                                 DRV_LOG(ERR, "Failed to create meter "
16798                                 "default drop rule for drop table.");
16799                                 goto policy_error;
16800                         }
16801                 }
16802                 if (!fm->drop_cnt)
16803                         continue;
16804                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16805                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16806                         /* Create matchers for Drop. */
16807                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16808                                         (enum modify_reg)mtr_id_reg_c, 0,
16809                                         (mtr_id_mask << mtr_id_offset));
16810                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16811                         matcher.crc = rte_raw_cksum
16812                                         ((const void *)matcher.mask.buf,
16813                                         matcher.mask.size);
16814                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16815                         if (!entry) {
16816                                 DRV_LOG(ERR,
16817                                 "Failed to register meter drop matcher.");
16818                                 goto policy_error;
16819                         }
16820                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16821                                 container_of(entry, struct mlx5_flow_dv_matcher,
16822                                              entry);
16823                 }
16824                 drop_matcher =
16825                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16826                 /* Create drop rule, matching meter_id only. */
16827                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16828                                 (enum modify_reg)mtr_id_reg_c,
16829                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16830                 i = 0;
16831                 cnt = flow_dv_counter_get_by_idx(dev,
16832                                         fm->drop_cnt, NULL);
16833                 actions[i++] = cnt->action;
16834                 actions[i++] = priv->sh->dr_drop_action;
16835                 misc_mask = flow_dv_matcher_enable(value.buf);
16836                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16837                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16838                                                (void *)&value, i, actions,
16839                                                &fm->drop_rule[domain]);
16840                 if (ret) {
16841                         DRV_LOG(ERR, "Failed to create meter "
16842                                 "drop rule for drop table.");
16843                                 goto policy_error;
16844                 }
16845         }
16846         return 0;
16847 policy_error:
16848         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16849                 if (fm->drop_rule[i]) {
16850                         claim_zero(mlx5_flow_os_destroy_flow
16851                                 (fm->drop_rule[i]));
16852                         fm->drop_rule[i] = NULL;
16853                 }
16854         }
16855         return -1;
16856 }
16857
16858 static struct mlx5_flow_meter_sub_policy *
16859 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16860                 struct mlx5_flow_meter_policy *mtr_policy,
16861                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16862                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16863                 bool *is_reuse)
16864 {
16865         struct mlx5_priv *priv = dev->data->dev_private;
16866         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16867         uint32_t sub_policy_idx = 0;
16868         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16869         uint32_t i, j;
16870         struct mlx5_hrxq *hrxq;
16871         struct mlx5_flow_handle dh;
16872         struct mlx5_meter_policy_action_container *act_cnt;
16873         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16874         uint16_t sub_policy_num;
16875         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
16876
16877         MLX5_ASSERT(wks);
16878         rte_spinlock_lock(&mtr_policy->sl);
16879         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16880                 if (!rss_desc[i])
16881                         continue;
16882                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16883                 if (!hrxq_idx[i]) {
16884                         rte_spinlock_unlock(&mtr_policy->sl);
16885                         return NULL;
16886                 }
16887         }
16888         sub_policy_num = (mtr_policy->sub_policy_num >>
16889                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16890                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16891         for (j = 0; j < sub_policy_num; j++) {
16892                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16893                         if (rss_desc[i] &&
16894                             hrxq_idx[i] !=
16895                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16896                                 break;
16897                 }
16898                 if (i >= MLX5_MTR_RTE_COLORS) {
16899                         /*
16900                          * Found the sub policy table with
16901                          * the same queue per color.
16902                          */
16903                         rte_spinlock_unlock(&mtr_policy->sl);
16904                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16905                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16906                         *is_reuse = true;
16907                         return mtr_policy->sub_policys[domain][j];
16908                 }
16909         }
16910         /* Create sub policy. */
16911         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16912                 /* Reuse the first pre-allocated sub_policy. */
16913                 sub_policy = mtr_policy->sub_policys[domain][0];
16914                 sub_policy_idx = sub_policy->idx;
16915         } else {
16916                 sub_policy = mlx5_ipool_zmalloc
16917                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16918                                  &sub_policy_idx);
16919                 if (!sub_policy ||
16920                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16921                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16922                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16923                         goto rss_sub_policy_error;
16924                 }
16925                 sub_policy->idx = sub_policy_idx;
16926                 sub_policy->main_policy = mtr_policy;
16927         }
16928         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16929                 if (!rss_desc[i])
16930                         continue;
16931                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16932                 if (mtr_policy->is_hierarchy) {
16933                         act_cnt = &mtr_policy->act_cnt[i];
16934                         act_cnt->next_sub_policy = next_sub_policy;
16935                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16936                 } else {
16937                         /*
16938                          * Overwrite the last action from
16939                          * RSS action to Queue action.
16940                          */
16941                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16942                                               hrxq_idx[i]);
16943                         if (!hrxq) {
16944                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16945                                 goto rss_sub_policy_error;
16946                         }
16947                         act_cnt = &mtr_policy->act_cnt[i];
16948                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16949                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16950                                 if (act_cnt->rix_mark)
16951                                         wks->mark = 1;
16952                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16953                                 dh.rix_hrxq = hrxq_idx[i];
16954                                 flow_drv_rxq_flags_set(dev, &dh);
16955                         }
16956                 }
16957         }
16958         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16959                                                sub_policy, domain)) {
16960                 DRV_LOG(ERR, "Failed to create policy "
16961                         "rules for ingress domain.");
16962                 goto rss_sub_policy_error;
16963         }
16964         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16965                 i = (mtr_policy->sub_policy_num >>
16966                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16967                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16968                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16969                         DRV_LOG(ERR, "No free sub-policy slot.");
16970                         goto rss_sub_policy_error;
16971                 }
16972                 mtr_policy->sub_policys[domain][i] = sub_policy;
16973                 i++;
16974                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16975                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16976                 mtr_policy->sub_policy_num |=
16977                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16978                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16979         }
16980         rte_spinlock_unlock(&mtr_policy->sl);
16981         *is_reuse = false;
16982         return sub_policy;
16983 rss_sub_policy_error:
16984         if (sub_policy) {
16985                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16986                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16987                         i = (mtr_policy->sub_policy_num >>
16988                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16989                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16990                         mtr_policy->sub_policys[domain][i] = NULL;
16991                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16992                                         sub_policy->idx);
16993                 }
16994         }
16995         rte_spinlock_unlock(&mtr_policy->sl);
16996         return NULL;
16997 }
16998
16999 /**
17000  * Find the policy table for prefix table with RSS.
17001  *
17002  * @param[in] dev
17003  *   Pointer to Ethernet device.
17004  * @param[in] mtr_policy
17005  *   Pointer to meter policy table.
17006  * @param[in] rss_desc
17007  *   Pointer to rss_desc
17008  * @return
17009  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17010  */
17011 static struct mlx5_flow_meter_sub_policy *
17012 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17013                 struct mlx5_flow_meter_policy *mtr_policy,
17014                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17015 {
17016         struct mlx5_priv *priv = dev->data->dev_private;
17017         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17018         struct mlx5_flow_meter_info *next_fm;
17019         struct mlx5_flow_meter_policy *next_policy;
17020         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17021         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17022         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17023         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17024         bool reuse_sub_policy;
17025         uint32_t i = 0;
17026         uint32_t j = 0;
17027
17028         while (true) {
17029                 /* Iterate hierarchy to get all policies in this hierarchy. */
17030                 policies[i++] = mtr_policy;
17031                 if (!mtr_policy->is_hierarchy)
17032                         break;
17033                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17034                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17035                         return NULL;
17036                 }
17037                 next_fm = mlx5_flow_meter_find(priv,
17038                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17039                 if (!next_fm) {
17040                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17041                         return NULL;
17042                 }
17043                 next_policy =
17044                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17045                                                     NULL);
17046                 MLX5_ASSERT(next_policy);
17047                 mtr_policy = next_policy;
17048         }
17049         while (i) {
17050                 /**
17051                  * From last policy to the first one in hierarchy,
17052                  * create / get the sub policy for each of them.
17053                  */
17054                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17055                                                         policies[--i],
17056                                                         rss_desc,
17057                                                         next_sub_policy,
17058                                                         &reuse_sub_policy);
17059                 if (!sub_policy) {
17060                         DRV_LOG(ERR, "Failed to get the sub policy.");
17061                         goto err_exit;
17062                 }
17063                 if (!reuse_sub_policy)
17064                         sub_policies[j++] = sub_policy;
17065                 next_sub_policy = sub_policy;
17066         }
17067         return sub_policy;
17068 err_exit:
17069         while (j) {
17070                 uint16_t sub_policy_num;
17071
17072                 sub_policy = sub_policies[--j];
17073                 mtr_policy = sub_policy->main_policy;
17074                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17075                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17076                         sub_policy_num = (mtr_policy->sub_policy_num >>
17077                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17078                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17079                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17080                                                                         NULL;
17081                         sub_policy_num--;
17082                         mtr_policy->sub_policy_num &=
17083                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17084                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17085                         mtr_policy->sub_policy_num |=
17086                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17087                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17088                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17089                                         sub_policy->idx);
17090                 }
17091         }
17092         return NULL;
17093 }
17094
17095 /**
17096  * Create the sub policy tag rule for all meters in hierarchy.
17097  *
17098  * @param[in] dev
17099  *   Pointer to Ethernet device.
17100  * @param[in] fm
17101  *   Meter information table.
17102  * @param[in] src_port
17103  *   The src port this extra rule should use.
17104  * @param[in] item
17105  *   The src port match item.
17106  * @param[out] error
17107  *   Perform verbose error reporting if not NULL.
17108  * @return
17109  *   0 on success, a negative errno value otherwise and rte_errno is set.
17110  */
17111 static int
17112 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17113                                 struct mlx5_flow_meter_info *fm,
17114                                 int32_t src_port,
17115                                 const struct rte_flow_item *item,
17116                                 struct rte_flow_error *error)
17117 {
17118         struct mlx5_priv *priv = dev->data->dev_private;
17119         struct mlx5_flow_meter_policy *mtr_policy;
17120         struct mlx5_flow_meter_sub_policy *sub_policy;
17121         struct mlx5_flow_meter_info *next_fm = NULL;
17122         struct mlx5_flow_meter_policy *next_policy;
17123         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17124         struct mlx5_flow_tbl_data_entry *tbl_data;
17125         struct mlx5_sub_policy_color_rule *color_rule;
17126         struct mlx5_meter_policy_acts acts;
17127         uint32_t color_reg_c_idx;
17128         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17129         struct rte_flow_attr attr = {
17130                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17131                 .priority = 0,
17132                 .ingress = 0,
17133                 .egress = 0,
17134                 .transfer = 1,
17135                 .reserved = 0,
17136         };
17137         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17138         int i;
17139
17140         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17141         MLX5_ASSERT(mtr_policy);
17142         if (!mtr_policy->is_hierarchy)
17143                 return 0;
17144         next_fm = mlx5_flow_meter_find(priv,
17145                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17146         if (!next_fm) {
17147                 return rte_flow_error_set(error, EINVAL,
17148                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17149                                 "Failed to find next meter in hierarchy.");
17150         }
17151         if (!next_fm->drop_cnt)
17152                 goto exit;
17153         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17154         sub_policy = mtr_policy->sub_policys[domain][0];
17155         for (i = 0; i < RTE_COLORS; i++) {
17156                 bool rule_exist = false;
17157                 struct mlx5_meter_policy_action_container *act_cnt;
17158
17159                 if (i >= RTE_COLOR_YELLOW)
17160                         break;
17161                 TAILQ_FOREACH(color_rule,
17162                               &sub_policy->color_rules[i], next_port)
17163                         if (color_rule->src_port == src_port) {
17164                                 rule_exist = true;
17165                                 break;
17166                         }
17167                 if (rule_exist)
17168                         continue;
17169                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17170                                 sizeof(struct mlx5_sub_policy_color_rule),
17171                                 0, SOCKET_ID_ANY);
17172                 if (!color_rule)
17173                         return rte_flow_error_set(error, ENOMEM,
17174                                 RTE_FLOW_ERROR_TYPE_ACTION,
17175                                 NULL, "No memory to create tag color rule.");
17176                 color_rule->src_port = src_port;
17177                 attr.priority = i;
17178                 next_policy = mlx5_flow_meter_policy_find(dev,
17179                                                 next_fm->policy_id, NULL);
17180                 MLX5_ASSERT(next_policy);
17181                 next_sub_policy = next_policy->sub_policys[domain][0];
17182                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17183                                         struct mlx5_flow_tbl_data_entry, tbl);
17184                 act_cnt = &mtr_policy->act_cnt[i];
17185                 if (mtr_first) {
17186                         acts.dv_actions[0] = next_fm->meter_action;
17187                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17188                 } else {
17189                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17190                         acts.dv_actions[1] = next_fm->meter_action;
17191                 }
17192                 acts.dv_actions[2] = tbl_data->jump.action;
17193                 acts.actions_n = 3;
17194                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17195                         next_fm = NULL;
17196                         goto err_exit;
17197                 }
17198                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17199                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17200                                 &attr, true, item,
17201                                 &color_rule->matcher, error)) {
17202                         rte_flow_error_set(error, errno,
17203                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17204                                 "Failed to create hierarchy meter matcher.");
17205                         goto err_exit;
17206                 }
17207                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17208                                         (enum rte_color)i,
17209                                         color_rule->matcher->matcher_object,
17210                                         acts.actions_n, acts.dv_actions,
17211                                         true, item,
17212                                         &color_rule->rule, &attr)) {
17213                         rte_flow_error_set(error, errno,
17214                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17215                                 "Failed to create hierarchy meter rule.");
17216                         goto err_exit;
17217                 }
17218                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17219                                   color_rule, next_port);
17220         }
17221 exit:
17222         /**
17223          * Recursive call to iterate all meters in hierarchy and
17224          * create needed rules.
17225          */
17226         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17227                                                 src_port, item, error);
17228 err_exit:
17229         if (color_rule) {
17230                 if (color_rule->rule)
17231                         mlx5_flow_os_destroy_flow(color_rule->rule);
17232                 if (color_rule->matcher) {
17233                         struct mlx5_flow_tbl_data_entry *tbl =
17234                                 container_of(color_rule->matcher->tbl,
17235                                                 typeof(*tbl), tbl);
17236                         mlx5_list_unregister(tbl->matchers,
17237                                                 &color_rule->matcher->entry);
17238                 }
17239                 mlx5_free(color_rule);
17240         }
17241         if (next_fm)
17242                 mlx5_flow_meter_detach(priv, next_fm);
17243         return -rte_errno;
17244 }
17245
17246 /**
17247  * Destroy the sub policy table with RX queue.
17248  *
17249  * @param[in] dev
17250  *   Pointer to Ethernet device.
17251  * @param[in] mtr_policy
17252  *   Pointer to meter policy table.
17253  */
17254 static void
17255 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17256                                     struct mlx5_flow_meter_policy *mtr_policy)
17257 {
17258         struct mlx5_priv *priv = dev->data->dev_private;
17259         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17260         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17261         uint32_t i, j;
17262         uint16_t sub_policy_num, new_policy_num;
17263
17264         rte_spinlock_lock(&mtr_policy->sl);
17265         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17266                 switch (mtr_policy->act_cnt[i].fate_action) {
17267                 case MLX5_FLOW_FATE_SHARED_RSS:
17268                         sub_policy_num = (mtr_policy->sub_policy_num >>
17269                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17270                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17271                         new_policy_num = sub_policy_num;
17272                         for (j = 0; j < sub_policy_num; j++) {
17273                                 sub_policy =
17274                                         mtr_policy->sub_policys[domain][j];
17275                                 if (sub_policy) {
17276                                         __flow_dv_destroy_sub_policy_rules(dev,
17277                                                 sub_policy);
17278                                 if (sub_policy !=
17279                                         mtr_policy->sub_policys[domain][0]) {
17280                                         mtr_policy->sub_policys[domain][j] =
17281                                                                 NULL;
17282                                         mlx5_ipool_free
17283                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17284                                                 sub_policy->idx);
17285                                                 new_policy_num--;
17286                                         }
17287                                 }
17288                         }
17289                         if (new_policy_num != sub_policy_num) {
17290                                 mtr_policy->sub_policy_num &=
17291                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17292                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17293                                 mtr_policy->sub_policy_num |=
17294                                 (new_policy_num &
17295                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17296                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17297                         }
17298                         break;
17299                 case MLX5_FLOW_FATE_QUEUE:
17300                         sub_policy = mtr_policy->sub_policys[domain][0];
17301                         __flow_dv_destroy_sub_policy_rules(dev,
17302                                                            sub_policy);
17303                         break;
17304                 default:
17305                         /*Other actions without queue and do nothing*/
17306                         break;
17307                 }
17308         }
17309         rte_spinlock_unlock(&mtr_policy->sl);
17310 }
17311 /**
17312  * Check whether the DR drop action is supported on the root table or not.
17313  *
17314  * Create a simple flow with DR drop action on root table to validate
17315  * if DR drop action on root table is supported or not.
17316  *
17317  * @param[in] dev
17318  *   Pointer to rte_eth_dev structure.
17319  *
17320  * @return
17321  *   0 on success, a negative errno value otherwise and rte_errno is set.
17322  */
17323 int
17324 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17325 {
17326         struct mlx5_priv *priv = dev->data->dev_private;
17327         struct mlx5_dev_ctx_shared *sh = priv->sh;
17328         struct mlx5_flow_dv_match_params mask = {
17329                 .size = sizeof(mask.buf),
17330         };
17331         struct mlx5_flow_dv_match_params value = {
17332                 .size = sizeof(value.buf),
17333         };
17334         struct mlx5dv_flow_matcher_attr dv_attr = {
17335                 .type = IBV_FLOW_ATTR_NORMAL,
17336                 .priority = 0,
17337                 .match_criteria_enable = 0,
17338                 .match_mask = (void *)&mask,
17339         };
17340         struct mlx5_flow_tbl_resource *tbl = NULL;
17341         void *matcher = NULL;
17342         void *flow = NULL;
17343         int ret = -1;
17344
17345         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17346                                         0, 0, 0, NULL);
17347         if (!tbl)
17348                 goto err;
17349         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17350         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17351         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17352                                                tbl->obj, &matcher);
17353         if (ret)
17354                 goto err;
17355         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17356         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17357                                        &sh->dr_drop_action, &flow);
17358 err:
17359         /*
17360          * If DR drop action is not supported on root table, flow create will
17361          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17362          */
17363         if (!flow) {
17364                 if (matcher &&
17365                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17366                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17367                 else
17368                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17369                 ret = -1;
17370         } else {
17371                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17372         }
17373         if (matcher)
17374                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17375         if (tbl)
17376                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17377         return ret;
17378 }
17379
17380 /**
17381  * Validate the batch counter support in root table.
17382  *
17383  * Create a simple flow with invalid counter and drop action on root table to
17384  * validate if batch counter with offset on root table is supported or not.
17385  *
17386  * @param[in] dev
17387  *   Pointer to rte_eth_dev structure.
17388  *
17389  * @return
17390  *   0 on success, a negative errno value otherwise and rte_errno is set.
17391  */
17392 int
17393 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17394 {
17395         struct mlx5_priv *priv = dev->data->dev_private;
17396         struct mlx5_dev_ctx_shared *sh = priv->sh;
17397         struct mlx5_flow_dv_match_params mask = {
17398                 .size = sizeof(mask.buf),
17399         };
17400         struct mlx5_flow_dv_match_params value = {
17401                 .size = sizeof(value.buf),
17402         };
17403         struct mlx5dv_flow_matcher_attr dv_attr = {
17404                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17405                 .priority = 0,
17406                 .match_criteria_enable = 0,
17407                 .match_mask = (void *)&mask,
17408         };
17409         void *actions[2] = { 0 };
17410         struct mlx5_flow_tbl_resource *tbl = NULL;
17411         struct mlx5_devx_obj *dcs = NULL;
17412         void *matcher = NULL;
17413         void *flow = NULL;
17414         int ret = -1;
17415
17416         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17417                                         0, 0, 0, NULL);
17418         if (!tbl)
17419                 goto err;
17420         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17421         if (!dcs)
17422                 goto err;
17423         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17424                                                     &actions[0]);
17425         if (ret)
17426                 goto err;
17427         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17428         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17429         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17430                                                tbl->obj, &matcher);
17431         if (ret)
17432                 goto err;
17433         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17434         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17435                                        actions, &flow);
17436 err:
17437         /*
17438          * If batch counter with offset is not supported, the driver will not
17439          * validate the invalid offset value, flow create should success.
17440          * In this case, it means batch counter is not supported in root table.
17441          *
17442          * Otherwise, if flow create is failed, counter offset is supported.
17443          */
17444         if (flow) {
17445                 DRV_LOG(INFO, "Batch counter is not supported in root "
17446                               "table. Switch to fallback mode.");
17447                 rte_errno = ENOTSUP;
17448                 ret = -rte_errno;
17449                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17450         } else {
17451                 /* Check matcher to make sure validate fail at flow create. */
17452                 if (!matcher || (matcher && errno != EINVAL))
17453                         DRV_LOG(ERR, "Unexpected error in counter offset "
17454                                      "support detection");
17455                 ret = 0;
17456         }
17457         if (actions[0])
17458                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17459         if (matcher)
17460                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17461         if (tbl)
17462                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17463         if (dcs)
17464                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17465         return ret;
17466 }
17467
17468 /**
17469  * Query a devx counter.
17470  *
17471  * @param[in] dev
17472  *   Pointer to the Ethernet device structure.
17473  * @param[in] cnt
17474  *   Index to the flow counter.
17475  * @param[in] clear
17476  *   Set to clear the counter statistics.
17477  * @param[out] pkts
17478  *   The statistics value of packets.
17479  * @param[out] bytes
17480  *   The statistics value of bytes.
17481  *
17482  * @return
17483  *   0 on success, otherwise return -1.
17484  */
17485 static int
17486 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17487                       uint64_t *pkts, uint64_t *bytes)
17488 {
17489         struct mlx5_priv *priv = dev->data->dev_private;
17490         struct mlx5_flow_counter *cnt;
17491         uint64_t inn_pkts, inn_bytes;
17492         int ret;
17493
17494         if (!priv->sh->devx)
17495                 return -1;
17496
17497         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17498         if (ret)
17499                 return -1;
17500         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17501         *pkts = inn_pkts - cnt->hits;
17502         *bytes = inn_bytes - cnt->bytes;
17503         if (clear) {
17504                 cnt->hits = inn_pkts;
17505                 cnt->bytes = inn_bytes;
17506         }
17507         return 0;
17508 }
17509
17510 /**
17511  * Get aged-out flows.
17512  *
17513  * @param[in] dev
17514  *   Pointer to the Ethernet device structure.
17515  * @param[in] context
17516  *   The address of an array of pointers to the aged-out flows contexts.
17517  * @param[in] nb_contexts
17518  *   The length of context array pointers.
17519  * @param[out] error
17520  *   Perform verbose error reporting if not NULL. Initialized in case of
17521  *   error only.
17522  *
17523  * @return
17524  *   how many contexts get in success, otherwise negative errno value.
17525  *   if nb_contexts is 0, return the amount of all aged contexts.
17526  *   if nb_contexts is not 0 , return the amount of aged flows reported
17527  *   in the context array.
17528  * @note: only stub for now
17529  */
17530 static int
17531 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17532                     void **context,
17533                     uint32_t nb_contexts,
17534                     struct rte_flow_error *error)
17535 {
17536         struct mlx5_priv *priv = dev->data->dev_private;
17537         struct mlx5_age_info *age_info;
17538         struct mlx5_age_param *age_param;
17539         struct mlx5_flow_counter *counter;
17540         struct mlx5_aso_age_action *act;
17541         int nb_flows = 0;
17542
17543         if (nb_contexts && !context)
17544                 return rte_flow_error_set(error, EINVAL,
17545                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17546                                           NULL, "empty context");
17547         age_info = GET_PORT_AGE_INFO(priv);
17548         rte_spinlock_lock(&age_info->aged_sl);
17549         LIST_FOREACH(act, &age_info->aged_aso, next) {
17550                 nb_flows++;
17551                 if (nb_contexts) {
17552                         context[nb_flows - 1] =
17553                                                 act->age_params.context;
17554                         if (!(--nb_contexts))
17555                                 break;
17556                 }
17557         }
17558         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17559                 nb_flows++;
17560                 if (nb_contexts) {
17561                         age_param = MLX5_CNT_TO_AGE(counter);
17562                         context[nb_flows - 1] = age_param->context;
17563                         if (!(--nb_contexts))
17564                                 break;
17565                 }
17566         }
17567         rte_spinlock_unlock(&age_info->aged_sl);
17568         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17569         return nb_flows;
17570 }
17571
17572 /*
17573  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17574  */
17575 static uint32_t
17576 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17577 {
17578         return flow_dv_counter_alloc(dev, 0);
17579 }
17580
17581 /**
17582  * Validate indirect action.
17583  * Dispatcher for action type specific validation.
17584  *
17585  * @param[in] dev
17586  *   Pointer to the Ethernet device structure.
17587  * @param[in] conf
17588  *   Indirect action configuration.
17589  * @param[in] action
17590  *   The indirect action object to validate.
17591  * @param[out] error
17592  *   Perform verbose error reporting if not NULL. Initialized in case of
17593  *   error only.
17594  *
17595  * @return
17596  *   0 on success, otherwise negative errno value.
17597  */
17598 static int
17599 flow_dv_action_validate(struct rte_eth_dev *dev,
17600                         const struct rte_flow_indir_action_conf *conf,
17601                         const struct rte_flow_action *action,
17602                         struct rte_flow_error *err)
17603 {
17604         struct mlx5_priv *priv = dev->data->dev_private;
17605
17606         RTE_SET_USED(conf);
17607         switch (action->type) {
17608         case RTE_FLOW_ACTION_TYPE_RSS:
17609                 /*
17610                  * priv->obj_ops is set according to driver capabilities.
17611                  * When DevX capabilities are
17612                  * sufficient, it is set to devx_obj_ops.
17613                  * Otherwise, it is set to ibv_obj_ops.
17614                  * ibv_obj_ops doesn't support ind_table_modify operation.
17615                  * In this case the indirect RSS action can't be used.
17616                  */
17617                 if (priv->obj_ops.ind_table_modify == NULL)
17618                         return rte_flow_error_set
17619                                         (err, ENOTSUP,
17620                                          RTE_FLOW_ERROR_TYPE_ACTION,
17621                                          NULL,
17622                                          "Indirect RSS action not supported");
17623                 return mlx5_validate_action_rss(dev, action, err);
17624         case RTE_FLOW_ACTION_TYPE_AGE:
17625                 if (!priv->sh->aso_age_mng)
17626                         return rte_flow_error_set(err, ENOTSUP,
17627                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17628                                                 NULL,
17629                                                 "Indirect age action not supported");
17630                 return flow_dv_validate_action_age(0, action, dev, err);
17631         case RTE_FLOW_ACTION_TYPE_COUNT:
17632                 return flow_dv_validate_action_count(dev, true, 0, err);
17633         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17634                 if (!priv->sh->ct_aso_en)
17635                         return rte_flow_error_set(err, ENOTSUP,
17636                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17637                                         "ASO CT is not supported");
17638                 return mlx5_validate_action_ct(dev, action->conf, err);
17639         default:
17640                 return rte_flow_error_set(err, ENOTSUP,
17641                                           RTE_FLOW_ERROR_TYPE_ACTION,
17642                                           NULL,
17643                                           "action type not supported");
17644         }
17645 }
17646
17647 /*
17648  * Check if the RSS configurations for colors of a meter policy match
17649  * each other, except the queues.
17650  *
17651  * @param[in] r1
17652  *   Pointer to the first RSS flow action.
17653  * @param[in] r2
17654  *   Pointer to the second RSS flow action.
17655  *
17656  * @return
17657  *   0 on match, 1 on conflict.
17658  */
17659 static inline int
17660 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17661                                const struct rte_flow_action_rss *r2)
17662 {
17663         if (r1 == NULL || r2 == NULL)
17664                 return 0;
17665         if (!(r1->level <= 1 && r2->level <= 1) &&
17666             !(r1->level > 1 && r2->level > 1))
17667                 return 1;
17668         if (r1->types != r2->types &&
17669             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17670               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17671                 return 1;
17672         if (r1->key || r2->key) {
17673                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17674                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17675
17676                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17677                         return 1;
17678         }
17679         return 0;
17680 }
17681
17682 /**
17683  * Validate the meter hierarchy chain for meter policy.
17684  *
17685  * @param[in] dev
17686  *   Pointer to the Ethernet device structure.
17687  * @param[in] meter_id
17688  *   Meter id.
17689  * @param[in] action_flags
17690  *   Holds the actions detected until now.
17691  * @param[out] is_rss
17692  *   Is RSS or not.
17693  * @param[out] hierarchy_domain
17694  *   The domain bitmap for hierarchy policy.
17695  * @param[out] error
17696  *   Perform verbose error reporting if not NULL. Initialized in case of
17697  *   error only.
17698  *
17699  * @return
17700  *   0 on success, otherwise negative errno value with error set.
17701  */
17702 static int
17703 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17704                                   uint32_t meter_id,
17705                                   uint64_t action_flags,
17706                                   bool *is_rss,
17707                                   uint8_t *hierarchy_domain,
17708                                   struct rte_mtr_error *error)
17709 {
17710         struct mlx5_priv *priv = dev->data->dev_private;
17711         struct mlx5_flow_meter_info *fm;
17712         struct mlx5_flow_meter_policy *policy;
17713         uint8_t cnt = 1;
17714
17715         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17716                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17717                 return -rte_mtr_error_set(error, EINVAL,
17718                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17719                                         NULL,
17720                                         "Multiple fate actions not supported.");
17721         *hierarchy_domain = 0;
17722         while (true) {
17723                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17724                 if (!fm)
17725                         return -rte_mtr_error_set(error, EINVAL,
17726                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17727                                         "Meter not found in meter hierarchy.");
17728                 if (fm->def_policy)
17729                         return -rte_mtr_error_set(error, EINVAL,
17730                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17731                         "Non termination meter not supported in hierarchy.");
17732                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17733                 MLX5_ASSERT(policy);
17734                 /**
17735                  * Only inherit the supported domains of the first meter in
17736                  * hierarchy.
17737                  * One meter supports at least one domain.
17738                  */
17739                 if (!*hierarchy_domain) {
17740                         if (policy->transfer)
17741                                 *hierarchy_domain |=
17742                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17743                         if (policy->ingress)
17744                                 *hierarchy_domain |=
17745                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17746                         if (policy->egress)
17747                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17748                 }
17749                 if (!policy->is_hierarchy) {
17750                         *is_rss = policy->is_rss;
17751                         break;
17752                 }
17753                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17754                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17755                         return -rte_mtr_error_set(error, EINVAL,
17756                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17757                                         "Exceed max hierarchy meter number.");
17758         }
17759         return 0;
17760 }
17761
17762 /**
17763  * Validate meter policy actions.
17764  * Dispatcher for action type specific validation.
17765  *
17766  * @param[in] dev
17767  *   Pointer to the Ethernet device structure.
17768  * @param[in] action
17769  *   The meter policy action object to validate.
17770  * @param[in] attr
17771  *   Attributes of flow to determine steering domain.
17772  * @param[out] error
17773  *   Perform verbose error reporting if not NULL. Initialized in case of
17774  *   error only.
17775  *
17776  * @return
17777  *   0 on success, otherwise negative errno value.
17778  */
17779 static int
17780 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17781                         const struct rte_flow_action *actions[RTE_COLORS],
17782                         struct rte_flow_attr *attr,
17783                         bool *is_rss,
17784                         uint8_t *domain_bitmap,
17785                         uint8_t *policy_mode,
17786                         struct rte_mtr_error *error)
17787 {
17788         struct mlx5_priv *priv = dev->data->dev_private;
17789         struct mlx5_dev_config *dev_conf = &priv->config;
17790         const struct rte_flow_action *act;
17791         uint64_t action_flags[RTE_COLORS] = {0};
17792         int actions_n;
17793         int i, ret;
17794         struct rte_flow_error flow_err;
17795         uint8_t domain_color[RTE_COLORS] = {0};
17796         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17797         uint8_t hierarchy_domain = 0;
17798         const struct rte_flow_action_meter *mtr;
17799         bool def_green = false;
17800         bool def_yellow = false;
17801         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17802
17803         if (!priv->config.dv_esw_en)
17804                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17805         *domain_bitmap = def_domain;
17806         /* Red color could only support DROP action. */
17807         if (!actions[RTE_COLOR_RED] ||
17808             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17809                 return -rte_mtr_error_set(error, ENOTSUP,
17810                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17811                                 NULL, "Red color only supports drop action.");
17812         /*
17813          * Check default policy actions:
17814          * Green / Yellow: no action, Red: drop action
17815          * Either G or Y will trigger default policy actions to be created.
17816          */
17817         if (!actions[RTE_COLOR_GREEN] ||
17818             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17819                 def_green = true;
17820         if (!actions[RTE_COLOR_YELLOW] ||
17821             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17822                 def_yellow = true;
17823         if (def_green && def_yellow) {
17824                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17825                 return 0;
17826         } else if (!def_green && def_yellow) {
17827                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17828         } else if (def_green && !def_yellow) {
17829                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17830         } else {
17831                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17832         }
17833         /* Set to empty string in case of NULL pointer access by user. */
17834         flow_err.message = "";
17835         for (i = 0; i < RTE_COLORS; i++) {
17836                 act = actions[i];
17837                 for (action_flags[i] = 0, actions_n = 0;
17838                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17839                      act++) {
17840                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17841                                 return -rte_mtr_error_set(error, ENOTSUP,
17842                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17843                                           NULL, "too many actions");
17844                         switch (act->type) {
17845                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17846                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17847                                 if (!priv->config.dv_esw_en)
17848                                         return -rte_mtr_error_set(error,
17849                                         ENOTSUP,
17850                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17851                                         NULL, "PORT action validate check"
17852                                         " fail for ESW disable");
17853                                 ret = flow_dv_validate_action_port_id(dev,
17854                                                 action_flags[i],
17855                                                 act, attr, &flow_err);
17856                                 if (ret)
17857                                         return -rte_mtr_error_set(error,
17858                                         ENOTSUP,
17859                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17860                                         NULL, flow_err.message ?
17861                                         flow_err.message :
17862                                         "PORT action validate check fail");
17863                                 ++actions_n;
17864                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17865                                 break;
17866                         case RTE_FLOW_ACTION_TYPE_MARK:
17867                                 ret = flow_dv_validate_action_mark(dev, act,
17868                                                            action_flags[i],
17869                                                            attr, &flow_err);
17870                                 if (ret < 0)
17871                                         return -rte_mtr_error_set(error,
17872                                         ENOTSUP,
17873                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17874                                         NULL, flow_err.message ?
17875                                         flow_err.message :
17876                                         "Mark action validate check fail");
17877                                 if (dev_conf->dv_xmeta_en !=
17878                                         MLX5_XMETA_MODE_LEGACY)
17879                                         return -rte_mtr_error_set(error,
17880                                         ENOTSUP,
17881                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17882                                         NULL, "Extend MARK action is "
17883                                         "not supported. Please try use "
17884                                         "default policy for meter.");
17885                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17886                                 ++actions_n;
17887                                 break;
17888                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17889                                 ret = flow_dv_validate_action_set_tag(dev,
17890                                                         act, action_flags[i],
17891                                                         attr, &flow_err);
17892                                 if (ret)
17893                                         return -rte_mtr_error_set(error,
17894                                         ENOTSUP,
17895                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17896                                         NULL, flow_err.message ?
17897                                         flow_err.message :
17898                                         "Set tag action validate check fail");
17899                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17900                                 ++actions_n;
17901                                 break;
17902                         case RTE_FLOW_ACTION_TYPE_DROP:
17903                                 ret = mlx5_flow_validate_action_drop
17904                                         (action_flags[i], attr, &flow_err);
17905                                 if (ret < 0)
17906                                         return -rte_mtr_error_set(error,
17907                                         ENOTSUP,
17908                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17909                                         NULL, flow_err.message ?
17910                                         flow_err.message :
17911                                         "Drop action validate check fail");
17912                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17913                                 ++actions_n;
17914                                 break;
17915                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17916                                 /*
17917                                  * Check whether extensive
17918                                  * metadata feature is engaged.
17919                                  */
17920                                 if (dev_conf->dv_flow_en &&
17921                                     (dev_conf->dv_xmeta_en !=
17922                                      MLX5_XMETA_MODE_LEGACY) &&
17923                                     mlx5_flow_ext_mreg_supported(dev))
17924                                         return -rte_mtr_error_set(error,
17925                                           ENOTSUP,
17926                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17927                                           NULL, "Queue action with meta "
17928                                           "is not supported. Please try use "
17929                                           "default policy for meter.");
17930                                 ret = mlx5_flow_validate_action_queue(act,
17931                                                         action_flags[i], dev,
17932                                                         attr, &flow_err);
17933                                 if (ret < 0)
17934                                         return -rte_mtr_error_set(error,
17935                                           ENOTSUP,
17936                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17937                                           NULL, flow_err.message ?
17938                                           flow_err.message :
17939                                           "Queue action validate check fail");
17940                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17941                                 ++actions_n;
17942                                 break;
17943                         case RTE_FLOW_ACTION_TYPE_RSS:
17944                                 if (dev_conf->dv_flow_en &&
17945                                     (dev_conf->dv_xmeta_en !=
17946                                      MLX5_XMETA_MODE_LEGACY) &&
17947                                     mlx5_flow_ext_mreg_supported(dev))
17948                                         return -rte_mtr_error_set(error,
17949                                           ENOTSUP,
17950                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17951                                           NULL, "RSS action with meta "
17952                                           "is not supported. Please try use "
17953                                           "default policy for meter.");
17954                                 ret = mlx5_validate_action_rss(dev, act,
17955                                                                &flow_err);
17956                                 if (ret < 0)
17957                                         return -rte_mtr_error_set(error,
17958                                           ENOTSUP,
17959                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17960                                           NULL, flow_err.message ?
17961                                           flow_err.message :
17962                                           "RSS action validate check fail");
17963                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17964                                 ++actions_n;
17965                                 /* Either G or Y will set the RSS. */
17966                                 rss_color[i] = act->conf;
17967                                 break;
17968                         case RTE_FLOW_ACTION_TYPE_JUMP:
17969                                 ret = flow_dv_validate_action_jump(dev,
17970                                         NULL, act, action_flags[i],
17971                                         attr, true, &flow_err);
17972                                 if (ret)
17973                                         return -rte_mtr_error_set(error,
17974                                           ENOTSUP,
17975                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17976                                           NULL, flow_err.message ?
17977                                           flow_err.message :
17978                                           "Jump action validate check fail");
17979                                 ++actions_n;
17980                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17981                                 break;
17982                         /*
17983                          * Only the last meter in the hierarchy will support
17984                          * the YELLOW color steering. Then in the meter policy
17985                          * actions list, there should be no other meter inside.
17986                          */
17987                         case RTE_FLOW_ACTION_TYPE_METER:
17988                                 if (i != RTE_COLOR_GREEN)
17989                                         return -rte_mtr_error_set(error,
17990                                                 ENOTSUP,
17991                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17992                                                 NULL,
17993                                                 "Meter hierarchy only supports GREEN color.");
17994                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17995                                         return -rte_mtr_error_set(error,
17996                                                 ENOTSUP,
17997                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17998                                                 NULL,
17999                                                 "No yellow policy should be provided in meter hierarchy.");
18000                                 mtr = act->conf;
18001                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18002                                                         mtr->mtr_id,
18003                                                         action_flags[i],
18004                                                         is_rss,
18005                                                         &hierarchy_domain,
18006                                                         error);
18007                                 if (ret)
18008                                         return ret;
18009                                 ++actions_n;
18010                                 action_flags[i] |=
18011                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18012                                 break;
18013                         default:
18014                                 return -rte_mtr_error_set(error, ENOTSUP,
18015                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18016                                         NULL,
18017                                         "Doesn't support optional action");
18018                         }
18019                 }
18020                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18021                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18022                 } else if ((action_flags[i] &
18023                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18024                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18025                         /*
18026                          * Only support MLX5_XMETA_MODE_LEGACY
18027                          * so MARK action is only in ingress domain.
18028                          */
18029                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18030                 } else {
18031                         domain_color[i] = def_domain;
18032                         if (action_flags[i] &&
18033                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18034                                 domain_color[i] &=
18035                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18036                 }
18037                 if (action_flags[i] &
18038                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18039                         domain_color[i] &= hierarchy_domain;
18040                 /*
18041                  * Non-termination actions only support NIC Tx domain.
18042                  * The adjustion should be skipped when there is no
18043                  * action or only END is provided. The default domains
18044                  * bit-mask is set to find the MIN intersection.
18045                  * The action flags checking should also be skipped.
18046                  */
18047                 if ((def_green && i == RTE_COLOR_GREEN) ||
18048                     (def_yellow && i == RTE_COLOR_YELLOW))
18049                         continue;
18050                 /*
18051                  * Validate the drop action mutual exclusion
18052                  * with other actions. Drop action is mutually-exclusive
18053                  * with any other action, except for Count action.
18054                  */
18055                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18056                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18057                         return -rte_mtr_error_set(error, ENOTSUP,
18058                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18059                                 NULL, "Drop action is mutually-exclusive "
18060                                 "with any other action");
18061                 }
18062                 /* Eswitch has few restrictions on using items and actions */
18063                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18064                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18065                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18066                                 return -rte_mtr_error_set(error, ENOTSUP,
18067                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18068                                         NULL, "unsupported action MARK");
18069                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18070                                 return -rte_mtr_error_set(error, ENOTSUP,
18071                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18072                                         NULL, "unsupported action QUEUE");
18073                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18074                                 return -rte_mtr_error_set(error, ENOTSUP,
18075                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18076                                         NULL, "unsupported action RSS");
18077                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18078                                 return -rte_mtr_error_set(error, ENOTSUP,
18079                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18080                                         NULL, "no fate action is found");
18081                 } else {
18082                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18083                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18084                                 if ((domain_color[i] &
18085                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18086                                         domain_color[i] =
18087                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18088                                 else
18089                                         return -rte_mtr_error_set(error,
18090                                                 ENOTSUP,
18091                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18092                                                 NULL,
18093                                                 "no fate action is found");
18094                         }
18095                 }
18096         }
18097         /* If both colors have RSS, the attributes should be the same. */
18098         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18099                                            rss_color[RTE_COLOR_YELLOW]))
18100                 return -rte_mtr_error_set(error, EINVAL,
18101                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18102                                           NULL, "policy RSS attr conflict");
18103         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18104                 *is_rss = true;
18105         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18106         if (!def_green && !def_yellow &&
18107             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18108             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18109             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18110                 return -rte_mtr_error_set(error, EINVAL,
18111                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18112                                           NULL, "policy domains conflict");
18113         /*
18114          * At least one color policy is listed in the actions, the domains
18115          * to be supported should be the intersection.
18116          */
18117         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18118                          domain_color[RTE_COLOR_YELLOW];
18119         return 0;
18120 }
18121
18122 static int
18123 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18124 {
18125         struct mlx5_priv *priv = dev->data->dev_private;
18126         int ret = 0;
18127
18128         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18129                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18130                                                 flags);
18131                 if (ret != 0)
18132                         return ret;
18133         }
18134         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18135                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18136                 if (ret != 0)
18137                         return ret;
18138         }
18139         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18140                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18141                 if (ret != 0)
18142                         return ret;
18143         }
18144         return 0;
18145 }
18146
18147 /**
18148  * Discover the number of available flow priorities
18149  * by trying to create a flow with the highest priority value
18150  * for each possible number.
18151  *
18152  * @param[in] dev
18153  *   Ethernet device.
18154  * @param[in] vprio
18155  *   List of possible number of available priorities.
18156  * @param[in] vprio_n
18157  *   Size of @p vprio array.
18158  * @return
18159  *   On success, number of available flow priorities.
18160  *   On failure, a negative errno-style code and rte_errno is set.
18161  */
18162 static int
18163 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18164                             const uint16_t *vprio, int vprio_n)
18165 {
18166         struct mlx5_priv *priv = dev->data->dev_private;
18167         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18168         struct rte_flow_item_eth eth;
18169         struct rte_flow_item item = {
18170                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18171                 .spec = &eth,
18172                 .mask = &eth,
18173         };
18174         struct mlx5_flow_dv_matcher matcher = {
18175                 .mask = {
18176                         .size = sizeof(matcher.mask.buf),
18177                 },
18178         };
18179         union mlx5_flow_tbl_key tbl_key;
18180         struct mlx5_flow flow;
18181         void *action;
18182         struct rte_flow_error error;
18183         uint8_t misc_mask;
18184         int i, err, ret = -ENOTSUP;
18185
18186         /*
18187          * Prepare a flow with a catch-all pattern and a drop action.
18188          * Use drop queue, because shared drop action may be unavailable.
18189          */
18190         action = priv->drop_queue.hrxq->action;
18191         if (action == NULL) {
18192                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18193                 rte_errno = ENOTSUP;
18194                 return -rte_errno;
18195         }
18196         memset(&flow, 0, sizeof(flow));
18197         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18198         if (flow.handle == NULL) {
18199                 DRV_LOG(ERR, "Cannot create flow handle");
18200                 rte_errno = ENOMEM;
18201                 return -rte_errno;
18202         }
18203         flow.ingress = true;
18204         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18205         flow.dv.actions[0] = action;
18206         flow.dv.actions_n = 1;
18207         memset(&eth, 0, sizeof(eth));
18208         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18209                                    &item, /* inner */ false, /* group */ 0);
18210         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18211         for (i = 0; i < vprio_n; i++) {
18212                 /* Configure the next proposed maximum priority. */
18213                 matcher.priority = vprio[i] - 1;
18214                 memset(&tbl_key, 0, sizeof(tbl_key));
18215                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18216                                                /* tunnel */ NULL,
18217                                                /* group */ 0,
18218                                                &error);
18219                 if (err != 0) {
18220                         /* This action is pure SW and must always succeed. */
18221                         DRV_LOG(ERR, "Cannot register matcher");
18222                         ret = -rte_errno;
18223                         break;
18224                 }
18225                 /* Try to apply the flow to HW. */
18226                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18227                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18228                 err = mlx5_flow_os_create_flow
18229                                 (flow.handle->dvh.matcher->matcher_object,
18230                                  (void *)&flow.dv.value, flow.dv.actions_n,
18231                                  flow.dv.actions, &flow.handle->drv_flow);
18232                 if (err == 0) {
18233                         claim_zero(mlx5_flow_os_destroy_flow
18234                                                 (flow.handle->drv_flow));
18235                         flow.handle->drv_flow = NULL;
18236                 }
18237                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18238                 if (err != 0)
18239                         break;
18240                 ret = vprio[i];
18241         }
18242         mlx5_ipool_free(pool, flow.handle_idx);
18243         /* Set rte_errno if no expected priority value matched. */
18244         if (ret < 0)
18245                 rte_errno = -ret;
18246         return ret;
18247 }
18248
18249 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18250         .validate = flow_dv_validate,
18251         .prepare = flow_dv_prepare,
18252         .translate = flow_dv_translate,
18253         .apply = flow_dv_apply,
18254         .remove = flow_dv_remove,
18255         .destroy = flow_dv_destroy,
18256         .query = flow_dv_query,
18257         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18258         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18259         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18260         .create_meter = flow_dv_mtr_alloc,
18261         .free_meter = flow_dv_aso_mtr_release_to_pool,
18262         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18263         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18264         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18265         .create_policy_rules = flow_dv_create_policy_rules,
18266         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18267         .create_def_policy = flow_dv_create_def_policy,
18268         .destroy_def_policy = flow_dv_destroy_def_policy,
18269         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18270         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18271         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18272         .counter_alloc = flow_dv_counter_allocate,
18273         .counter_free = flow_dv_counter_free,
18274         .counter_query = flow_dv_counter_query,
18275         .get_aged_flows = flow_dv_get_aged_flows,
18276         .action_validate = flow_dv_action_validate,
18277         .action_create = flow_dv_action_create,
18278         .action_destroy = flow_dv_action_destroy,
18279         .action_update = flow_dv_action_update,
18280         .action_query = flow_dv_action_query,
18281         .sync_domain = flow_dv_sync_domain,
18282         .discover_priorities = flow_dv_discover_priorities,
18283         .item_create = flow_dv_item_create,
18284         .item_release = flow_dv_item_release,
18285 };
18286
18287 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18288