net/txgbe: fix debug logs
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
1150                             MLX5_XMETA_MODE_LEGACY);
1151                 if (conf->dst == REG_C_0) {
1152                         /* Copy to reg_c[0], within mask only. */
1153                         reg_dst.offset = rte_bsf32(reg_c0);
1154                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1155                 } else {
1156                         reg_dst.offset = 0;
1157                         mask = rte_cpu_to_be_32(reg_c0);
1158                 }
1159         }
1160         return flow_dv_convert_modify_action(&item,
1161                                              reg_src, &reg_dst, res,
1162                                              MLX5_MODIFICATION_TYPE_COPY,
1163                                              error);
1164 }
1165
1166 /**
1167  * Convert MARK action to DV specification. This routine is used
1168  * in extensive metadata only and requires metadata register to be
1169  * handled. In legacy mode hardware tag resource is engaged.
1170  *
1171  * @param[in] dev
1172  *   Pointer to the rte_eth_dev structure.
1173  * @param[in] conf
1174  *   Pointer to MARK action specification.
1175  * @param[in,out] resource
1176  *   Pointer to the modify-header resource.
1177  * @param[out] error
1178  *   Pointer to the error structure.
1179  *
1180  * @return
1181  *   0 on success, a negative errno value otherwise and rte_errno is set.
1182  */
1183 static int
1184 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1185                             const struct rte_flow_action_mark *conf,
1186                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1187                             struct rte_flow_error *error)
1188 {
1189         struct mlx5_priv *priv = dev->data->dev_private;
1190         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1191                                            priv->sh->dv_mark_mask);
1192         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1193         struct rte_flow_item item = {
1194                 .spec = &data,
1195                 .mask = &mask,
1196         };
1197         struct field_modify_info reg_c_x[] = {
1198                 [1] = {0, 0, 0},
1199         };
1200         int reg;
1201
1202         if (!mask)
1203                 return rte_flow_error_set(error, EINVAL,
1204                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1205                                           NULL, "zero mark action mask");
1206         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1207         if (reg < 0)
1208                 return reg;
1209         MLX5_ASSERT(reg > 0);
1210         if (reg == REG_C_0) {
1211                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1212                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1213
1214                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1215                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1216                 mask = rte_cpu_to_be_32(mask << shl_c0);
1217         }
1218         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1219         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1220                                              MLX5_MODIFICATION_TYPE_SET, error);
1221 }
1222
1223 /**
1224  * Get metadata register index for specified steering domain.
1225  *
1226  * @param[in] dev
1227  *   Pointer to the rte_eth_dev structure.
1228  * @param[in] attr
1229  *   Attributes of flow to determine steering domain.
1230  * @param[out] error
1231  *   Pointer to the error structure.
1232  *
1233  * @return
1234  *   positive index on success, a negative errno value otherwise
1235  *   and rte_errno is set.
1236  */
1237 static enum modify_reg
1238 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1239                          const struct rte_flow_attr *attr,
1240                          struct rte_flow_error *error)
1241 {
1242         int reg =
1243                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1244                                           MLX5_METADATA_FDB :
1245                                             attr->egress ?
1246                                             MLX5_METADATA_TX :
1247                                             MLX5_METADATA_RX, 0, error);
1248         if (reg < 0)
1249                 return rte_flow_error_set(error,
1250                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1251                                           NULL, "unavailable "
1252                                           "metadata register");
1253         return reg;
1254 }
1255
1256 /**
1257  * Convert SET_META action to DV specification.
1258  *
1259  * @param[in] dev
1260  *   Pointer to the rte_eth_dev structure.
1261  * @param[in,out] resource
1262  *   Pointer to the modify-header resource.
1263  * @param[in] attr
1264  *   Attributes of flow that includes this item.
1265  * @param[in] conf
1266  *   Pointer to action specification.
1267  * @param[out] error
1268  *   Pointer to the error structure.
1269  *
1270  * @return
1271  *   0 on success, a negative errno value otherwise and rte_errno is set.
1272  */
1273 static int
1274 flow_dv_convert_action_set_meta
1275                         (struct rte_eth_dev *dev,
1276                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1277                          const struct rte_flow_attr *attr,
1278                          const struct rte_flow_action_set_meta *conf,
1279                          struct rte_flow_error *error)
1280 {
1281         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1282         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1283         struct rte_flow_item item = {
1284                 .spec = &data,
1285                 .mask = &mask,
1286         };
1287         struct field_modify_info reg_c_x[] = {
1288                 [1] = {0, 0, 0},
1289         };
1290         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1291
1292         if (reg < 0)
1293                 return reg;
1294         MLX5_ASSERT(reg != REG_NON);
1295         if (reg == REG_C_0) {
1296                 struct mlx5_priv *priv = dev->data->dev_private;
1297                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1298                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1299
1300                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1301                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1302                 mask = rte_cpu_to_be_32(mask << shl_c0);
1303         }
1304         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1305         /* The routine expects parameters in memory as big-endian ones. */
1306         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1307                                              MLX5_MODIFICATION_TYPE_SET, error);
1308 }
1309
1310 /**
1311  * Convert modify-header set IPv4 DSCP action to DV specification.
1312  *
1313  * @param[in,out] resource
1314  *   Pointer to the modify-header resource.
1315  * @param[in] action
1316  *   Pointer to action specification.
1317  * @param[out] error
1318  *   Pointer to the error structure.
1319  *
1320  * @return
1321  *   0 on success, a negative errno value otherwise and rte_errno is set.
1322  */
1323 static int
1324 flow_dv_convert_action_modify_ipv4_dscp
1325                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1326                          const struct rte_flow_action *action,
1327                          struct rte_flow_error *error)
1328 {
1329         const struct rte_flow_action_set_dscp *conf =
1330                 (const struct rte_flow_action_set_dscp *)(action->conf);
1331         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1332         struct rte_flow_item_ipv4 ipv4;
1333         struct rte_flow_item_ipv4 ipv4_mask;
1334
1335         memset(&ipv4, 0, sizeof(ipv4));
1336         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1337         ipv4.hdr.type_of_service = conf->dscp;
1338         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1339         item.spec = &ipv4;
1340         item.mask = &ipv4_mask;
1341         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1342                                              MLX5_MODIFICATION_TYPE_SET, error);
1343 }
1344
1345 /**
1346  * Convert modify-header set IPv6 DSCP action to DV specification.
1347  *
1348  * @param[in,out] resource
1349  *   Pointer to the modify-header resource.
1350  * @param[in] action
1351  *   Pointer to action specification.
1352  * @param[out] error
1353  *   Pointer to the error structure.
1354  *
1355  * @return
1356  *   0 on success, a negative errno value otherwise and rte_errno is set.
1357  */
1358 static int
1359 flow_dv_convert_action_modify_ipv6_dscp
1360                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1361                          const struct rte_flow_action *action,
1362                          struct rte_flow_error *error)
1363 {
1364         const struct rte_flow_action_set_dscp *conf =
1365                 (const struct rte_flow_action_set_dscp *)(action->conf);
1366         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1367         struct rte_flow_item_ipv6 ipv6;
1368         struct rte_flow_item_ipv6 ipv6_mask;
1369
1370         memset(&ipv6, 0, sizeof(ipv6));
1371         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1372         /*
1373          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1374          * rdma-core only accept the DSCP bits byte aligned start from
1375          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1376          * bits in IPv6 case as rdma-core requires byte aligned value.
1377          */
1378         ipv6.hdr.vtc_flow = conf->dscp;
1379         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1380         item.spec = &ipv6;
1381         item.mask = &ipv6_mask;
1382         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1383                                              MLX5_MODIFICATION_TYPE_SET, error);
1384 }
1385
1386 static int
1387 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1388                            enum rte_flow_field_id field, int inherit,
1389                            const struct rte_flow_attr *attr,
1390                            struct rte_flow_error *error)
1391 {
1392         struct mlx5_priv *priv = dev->data->dev_private;
1393
1394         switch (field) {
1395         case RTE_FLOW_FIELD_START:
1396                 return 32;
1397         case RTE_FLOW_FIELD_MAC_DST:
1398         case RTE_FLOW_FIELD_MAC_SRC:
1399                 return 48;
1400         case RTE_FLOW_FIELD_VLAN_TYPE:
1401                 return 16;
1402         case RTE_FLOW_FIELD_VLAN_ID:
1403                 return 12;
1404         case RTE_FLOW_FIELD_MAC_TYPE:
1405                 return 16;
1406         case RTE_FLOW_FIELD_IPV4_DSCP:
1407                 return 6;
1408         case RTE_FLOW_FIELD_IPV4_TTL:
1409                 return 8;
1410         case RTE_FLOW_FIELD_IPV4_SRC:
1411         case RTE_FLOW_FIELD_IPV4_DST:
1412                 return 32;
1413         case RTE_FLOW_FIELD_IPV6_DSCP:
1414                 return 6;
1415         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1416                 return 8;
1417         case RTE_FLOW_FIELD_IPV6_SRC:
1418         case RTE_FLOW_FIELD_IPV6_DST:
1419                 return 128;
1420         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1421         case RTE_FLOW_FIELD_TCP_PORT_DST:
1422                 return 16;
1423         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1424         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1425                 return 32;
1426         case RTE_FLOW_FIELD_TCP_FLAGS:
1427                 return 9;
1428         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1429         case RTE_FLOW_FIELD_UDP_PORT_DST:
1430                 return 16;
1431         case RTE_FLOW_FIELD_VXLAN_VNI:
1432         case RTE_FLOW_FIELD_GENEVE_VNI:
1433                 return 24;
1434         case RTE_FLOW_FIELD_GTP_TEID:
1435         case RTE_FLOW_FIELD_TAG:
1436                 return 32;
1437         case RTE_FLOW_FIELD_MARK:
1438                 return __builtin_popcount(priv->sh->dv_mark_mask);
1439         case RTE_FLOW_FIELD_META:
1440                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1441                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1442         case RTE_FLOW_FIELD_POINTER:
1443         case RTE_FLOW_FIELD_VALUE:
1444                 return inherit < 0 ? 0 : inherit;
1445         default:
1446                 MLX5_ASSERT(false);
1447         }
1448         return 0;
1449 }
1450
1451 static void
1452 mlx5_flow_field_id_to_modify_info
1453                 (const struct rte_flow_action_modify_data *data,
1454                  struct field_modify_info *info, uint32_t *mask,
1455                  uint32_t width, struct rte_eth_dev *dev,
1456                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1457 {
1458         struct mlx5_priv *priv = dev->data->dev_private;
1459         uint32_t idx = 0;
1460         uint32_t off = 0;
1461
1462         switch (data->field) {
1463         case RTE_FLOW_FIELD_START:
1464                 /* not supported yet */
1465                 MLX5_ASSERT(false);
1466                 break;
1467         case RTE_FLOW_FIELD_MAC_DST:
1468                 off = data->offset > 16 ? data->offset - 16 : 0;
1469                 if (mask) {
1470                         if (data->offset < 16) {
1471                                 info[idx] = (struct field_modify_info){2, 4,
1472                                                 MLX5_MODI_OUT_DMAC_15_0};
1473                                 if (width < 16) {
1474                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1475                                                                  (16 - width));
1476                                         width = 0;
1477                                 } else {
1478                                         mask[1] = RTE_BE16(0xffff);
1479                                         width -= 16;
1480                                 }
1481                                 if (!width)
1482                                         break;
1483                                 ++idx;
1484                         }
1485                         info[idx] = (struct field_modify_info){4, 0,
1486                                                 MLX5_MODI_OUT_DMAC_47_16};
1487                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1488                                                     (32 - width)) << off);
1489                 } else {
1490                         if (data->offset < 16)
1491                                 info[idx++] = (struct field_modify_info){2, 0,
1492                                                 MLX5_MODI_OUT_DMAC_15_0};
1493                         info[idx] = (struct field_modify_info){4, off,
1494                                                 MLX5_MODI_OUT_DMAC_47_16};
1495                 }
1496                 break;
1497         case RTE_FLOW_FIELD_MAC_SRC:
1498                 off = data->offset > 16 ? data->offset - 16 : 0;
1499                 if (mask) {
1500                         if (data->offset < 16) {
1501                                 info[idx] = (struct field_modify_info){2, 4,
1502                                                 MLX5_MODI_OUT_SMAC_15_0};
1503                                 if (width < 16) {
1504                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1505                                                                  (16 - width));
1506                                         width = 0;
1507                                 } else {
1508                                         mask[1] = RTE_BE16(0xffff);
1509                                         width -= 16;
1510                                 }
1511                                 if (!width)
1512                                         break;
1513                                 ++idx;
1514                         }
1515                         info[idx] = (struct field_modify_info){4, 0,
1516                                                 MLX5_MODI_OUT_SMAC_47_16};
1517                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1518                                                     (32 - width)) << off);
1519                 } else {
1520                         if (data->offset < 16)
1521                                 info[idx++] = (struct field_modify_info){2, 0,
1522                                                 MLX5_MODI_OUT_SMAC_15_0};
1523                         info[idx] = (struct field_modify_info){4, off,
1524                                                 MLX5_MODI_OUT_SMAC_47_16};
1525                 }
1526                 break;
1527         case RTE_FLOW_FIELD_VLAN_TYPE:
1528                 /* not supported yet */
1529                 break;
1530         case RTE_FLOW_FIELD_VLAN_ID:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_FIRST_VID};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_MAC_TYPE:
1537                 info[idx] = (struct field_modify_info){2, 0,
1538                                         MLX5_MODI_OUT_ETHERTYPE};
1539                 if (mask)
1540                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_DSCP:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IP_DSCP};
1545                 if (mask)
1546                         mask[idx] = 0x3f >> (6 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_TTL:
1549                 info[idx] = (struct field_modify_info){1, 0,
1550                                         MLX5_MODI_OUT_IPV4_TTL};
1551                 if (mask)
1552                         mask[idx] = 0xff >> (8 - width);
1553                 break;
1554         case RTE_FLOW_FIELD_IPV4_SRC:
1555                 info[idx] = (struct field_modify_info){4, 0,
1556                                         MLX5_MODI_OUT_SIPV4};
1557                 if (mask)
1558                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1559                                                      (32 - width));
1560                 break;
1561         case RTE_FLOW_FIELD_IPV4_DST:
1562                 info[idx] = (struct field_modify_info){4, 0,
1563                                         MLX5_MODI_OUT_DIPV4};
1564                 if (mask)
1565                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1566                                                      (32 - width));
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_DSCP:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IP_DSCP};
1571                 if (mask)
1572                         mask[idx] = 0x3f >> (6 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1575                 info[idx] = (struct field_modify_info){1, 0,
1576                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1577                 if (mask)
1578                         mask[idx] = 0xff >> (8 - width);
1579                 break;
1580         case RTE_FLOW_FIELD_IPV6_SRC:
1581                 if (mask) {
1582                         if (data->offset < 32) {
1583                                 info[idx] = (struct field_modify_info){4, 12,
1584                                                 MLX5_MODI_OUT_SIPV6_31_0};
1585                                 if (width < 32) {
1586                                         mask[3] =
1587                                                 rte_cpu_to_be_32(0xffffffff >>
1588                                                                  (32 - width));
1589                                         width = 0;
1590                                 } else {
1591                                         mask[3] = RTE_BE32(0xffffffff);
1592                                         width -= 32;
1593                                 }
1594                                 if (!width)
1595                                         break;
1596                                 ++idx;
1597                         }
1598                         if (data->offset < 64) {
1599                                 info[idx] = (struct field_modify_info){4, 8,
1600                                                 MLX5_MODI_OUT_SIPV6_63_32};
1601                                 if (width < 32) {
1602                                         mask[2] =
1603                                                 rte_cpu_to_be_32(0xffffffff >>
1604                                                                  (32 - width));
1605                                         width = 0;
1606                                 } else {
1607                                         mask[2] = RTE_BE32(0xffffffff);
1608                                         width -= 32;
1609                                 }
1610                                 if (!width)
1611                                         break;
1612                                 ++idx;
1613                         }
1614                         if (data->offset < 96) {
1615                                 info[idx] = (struct field_modify_info){4, 4,
1616                                                 MLX5_MODI_OUT_SIPV6_95_64};
1617                                 if (width < 32) {
1618                                         mask[1] =
1619                                                 rte_cpu_to_be_32(0xffffffff >>
1620                                                                  (32 - width));
1621                                         width = 0;
1622                                 } else {
1623                                         mask[1] = RTE_BE32(0xffffffff);
1624                                         width -= 32;
1625                                 }
1626                                 if (!width)
1627                                         break;
1628                                 ++idx;
1629                         }
1630                         info[idx] = (struct field_modify_info){4, 0,
1631                                                 MLX5_MODI_OUT_SIPV6_127_96};
1632                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1633                 } else {
1634                         if (data->offset < 32)
1635                                 info[idx++] = (struct field_modify_info){4, 0,
1636                                                 MLX5_MODI_OUT_SIPV6_31_0};
1637                         if (data->offset < 64)
1638                                 info[idx++] = (struct field_modify_info){4, 0,
1639                                                 MLX5_MODI_OUT_SIPV6_63_32};
1640                         if (data->offset < 96)
1641                                 info[idx++] = (struct field_modify_info){4, 0,
1642                                                 MLX5_MODI_OUT_SIPV6_95_64};
1643                         if (data->offset < 128)
1644                                 info[idx++] = (struct field_modify_info){4, 0,
1645                                                 MLX5_MODI_OUT_SIPV6_127_96};
1646                 }
1647                 break;
1648         case RTE_FLOW_FIELD_IPV6_DST:
1649                 if (mask) {
1650                         if (data->offset < 32) {
1651                                 info[idx] = (struct field_modify_info){4, 12,
1652                                                 MLX5_MODI_OUT_DIPV6_31_0};
1653                                 if (width < 32) {
1654                                         mask[3] =
1655                                                 rte_cpu_to_be_32(0xffffffff >>
1656                                                                  (32 - width));
1657                                         width = 0;
1658                                 } else {
1659                                         mask[3] = RTE_BE32(0xffffffff);
1660                                         width -= 32;
1661                                 }
1662                                 if (!width)
1663                                         break;
1664                                 ++idx;
1665                         }
1666                         if (data->offset < 64) {
1667                                 info[idx] = (struct field_modify_info){4, 8,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[2] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[2] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4, 4,
1684                                                 MLX5_MODI_OUT_DIPV6_95_64};
1685                                 if (width < 32) {
1686                                         mask[1] =
1687                                                 rte_cpu_to_be_32(0xffffffff >>
1688                                                                  (32 - width));
1689                                         width = 0;
1690                                 } else {
1691                                         mask[1] = RTE_BE32(0xffffffff);
1692                                         width -= 32;
1693                                 }
1694                                 if (!width)
1695                                         break;
1696                                 ++idx;
1697                         }
1698                         info[idx] = (struct field_modify_info){4, 0,
1699                                                 MLX5_MODI_OUT_DIPV6_127_96};
1700                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1701                 } else {
1702                         if (data->offset < 32)
1703                                 info[idx++] = (struct field_modify_info){4, 0,
1704                                                 MLX5_MODI_OUT_DIPV6_31_0};
1705                         if (data->offset < 64)
1706                                 info[idx++] = (struct field_modify_info){4, 0,
1707                                                 MLX5_MODI_OUT_DIPV6_63_32};
1708                         if (data->offset < 96)
1709                                 info[idx++] = (struct field_modify_info){4, 0,
1710                                                 MLX5_MODI_OUT_DIPV6_95_64};
1711                         if (data->offset < 128)
1712                                 info[idx++] = (struct field_modify_info){4, 0,
1713                                                 MLX5_MODI_OUT_DIPV6_127_96};
1714                 }
1715                 break;
1716         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1717                 info[idx] = (struct field_modify_info){2, 0,
1718                                         MLX5_MODI_OUT_TCP_SPORT};
1719                 if (mask)
1720                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1721                 break;
1722         case RTE_FLOW_FIELD_TCP_PORT_DST:
1723                 info[idx] = (struct field_modify_info){2, 0,
1724                                         MLX5_MODI_OUT_TCP_DPORT};
1725                 if (mask)
1726                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1727                 break;
1728         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1729                 info[idx] = (struct field_modify_info){4, 0,
1730                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1731                 if (mask)
1732                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1733                                                      (32 - width));
1734                 break;
1735         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1736                 info[idx] = (struct field_modify_info){4, 0,
1737                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1738                 if (mask)
1739                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1740                                                      (32 - width));
1741                 break;
1742         case RTE_FLOW_FIELD_TCP_FLAGS:
1743                 info[idx] = (struct field_modify_info){2, 0,
1744                                         MLX5_MODI_OUT_TCP_FLAGS};
1745                 if (mask)
1746                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1747                 break;
1748         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1749                 info[idx] = (struct field_modify_info){2, 0,
1750                                         MLX5_MODI_OUT_UDP_SPORT};
1751                 if (mask)
1752                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1753                 break;
1754         case RTE_FLOW_FIELD_UDP_PORT_DST:
1755                 info[idx] = (struct field_modify_info){2, 0,
1756                                         MLX5_MODI_OUT_UDP_DPORT};
1757                 if (mask)
1758                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1759                 break;
1760         case RTE_FLOW_FIELD_VXLAN_VNI:
1761                 /* not supported yet */
1762                 break;
1763         case RTE_FLOW_FIELD_GENEVE_VNI:
1764                 /* not supported yet*/
1765                 break;
1766         case RTE_FLOW_FIELD_GTP_TEID:
1767                 info[idx] = (struct field_modify_info){4, 0,
1768                                         MLX5_MODI_GTP_TEID};
1769                 if (mask)
1770                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1771                                                      (32 - width));
1772                 break;
1773         case RTE_FLOW_FIELD_TAG:
1774                 {
1775                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1776                                                    data->level, error);
1777                         if (reg < 0)
1778                                 return;
1779                         MLX5_ASSERT(reg != REG_NON);
1780                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1781                         info[idx] = (struct field_modify_info){4, 0,
1782                                                 reg_to_field[reg]};
1783                         if (mask)
1784                                 mask[idx] =
1785                                         rte_cpu_to_be_32(0xffffffff >>
1786                                                          (32 - width));
1787                 }
1788                 break;
1789         case RTE_FLOW_FIELD_MARK:
1790                 {
1791                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1792                         uint32_t mark_count = __builtin_popcount(mark_mask);
1793                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1794                                                        0, error);
1795                         if (reg < 0)
1796                                 return;
1797                         MLX5_ASSERT(reg != REG_NON);
1798                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1799                         info[idx] = (struct field_modify_info){4, 0,
1800                                                 reg_to_field[reg]};
1801                         if (mask)
1802                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1803                                          (mark_count - width)) & mark_mask);
1804                 }
1805                 break;
1806         case RTE_FLOW_FIELD_META:
1807                 {
1808                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1809                         uint32_t meta_count = __builtin_popcount(meta_mask);
1810                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1811                         if (reg < 0)
1812                                 return;
1813                         MLX5_ASSERT(reg != REG_NON);
1814                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1815                         info[idx] = (struct field_modify_info){4, 0,
1816                                                 reg_to_field[reg]};
1817                         if (mask)
1818                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1819                                         (meta_count - width)) & meta_mask);
1820                 }
1821                 break;
1822         case RTE_FLOW_FIELD_POINTER:
1823         case RTE_FLOW_FIELD_VALUE:
1824         default:
1825                 MLX5_ASSERT(false);
1826                 break;
1827         }
1828 }
1829
1830 /**
1831  * Convert modify_field action to DV specification.
1832  *
1833  * @param[in] dev
1834  *   Pointer to the rte_eth_dev structure.
1835  * @param[in,out] resource
1836  *   Pointer to the modify-header resource.
1837  * @param[in] action
1838  *   Pointer to action specification.
1839  * @param[in] attr
1840  *   Attributes of flow that includes this item.
1841  * @param[out] error
1842  *   Pointer to the error structure.
1843  *
1844  * @return
1845  *   0 on success, a negative errno value otherwise and rte_errno is set.
1846  */
1847 static int
1848 flow_dv_convert_action_modify_field
1849                         (struct rte_eth_dev *dev,
1850                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1851                          const struct rte_flow_action *action,
1852                          const struct rte_flow_attr *attr,
1853                          struct rte_flow_error *error)
1854 {
1855         const struct rte_flow_action_modify_field *conf =
1856                 (const struct rte_flow_action_modify_field *)(action->conf);
1857         struct rte_flow_item item = {
1858                 .spec = NULL,
1859                 .mask = NULL
1860         };
1861         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1862                                                                 {0, 0, 0} };
1863         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1864                                                                 {0, 0, 0} };
1865         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1866         uint32_t type, meta = 0;
1867
1868         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1869             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1870                 type = MLX5_MODIFICATION_TYPE_SET;
1871                 /** For SET fill the destination field (field) first. */
1872                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1873                                                   conf->width, dev,
1874                                                   attr, error);
1875                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1876                                         (void *)(uintptr_t)conf->src.pvalue :
1877                                         (void *)(uintptr_t)&conf->src.value;
1878                 if (conf->dst.field == RTE_FLOW_FIELD_META) {
1879                         meta = *(const unaligned_uint32_t *)item.spec;
1880                         meta = rte_cpu_to_be_32(meta);
1881                         item.spec = &meta;
1882                 }
1883         } else {
1884                 type = MLX5_MODIFICATION_TYPE_COPY;
1885                 /** For COPY fill the destination field (dcopy) without mask. */
1886                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1887                                                   conf->width, dev,
1888                                                   attr, error);
1889                 /** Then construct the source field (field) with mask. */
1890                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1891                                                   conf->width, dev,
1892                                                   attr, error);
1893         }
1894         item.mask = &mask;
1895         return flow_dv_convert_modify_action(&item,
1896                         field, dcopy, resource, type, error);
1897 }
1898
1899 /**
1900  * Validate MARK item.
1901  *
1902  * @param[in] dev
1903  *   Pointer to the rte_eth_dev structure.
1904  * @param[in] item
1905  *   Item specification.
1906  * @param[in] attr
1907  *   Attributes of flow that includes this item.
1908  * @param[out] error
1909  *   Pointer to error structure.
1910  *
1911  * @return
1912  *   0 on success, a negative errno value otherwise and rte_errno is set.
1913  */
1914 static int
1915 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1916                            const struct rte_flow_item *item,
1917                            const struct rte_flow_attr *attr __rte_unused,
1918                            struct rte_flow_error *error)
1919 {
1920         struct mlx5_priv *priv = dev->data->dev_private;
1921         struct mlx5_sh_config *config = &priv->sh->config;
1922         const struct rte_flow_item_mark *spec = item->spec;
1923         const struct rte_flow_item_mark *mask = item->mask;
1924         const struct rte_flow_item_mark nic_mask = {
1925                 .id = priv->sh->dv_mark_mask,
1926         };
1927         int ret;
1928
1929         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1930                 return rte_flow_error_set(error, ENOTSUP,
1931                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1932                                           "extended metadata feature"
1933                                           " isn't enabled");
1934         if (!mlx5_flow_ext_mreg_supported(dev))
1935                 return rte_flow_error_set(error, ENOTSUP,
1936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1937                                           "extended metadata register"
1938                                           " isn't supported");
1939         if (!nic_mask.id)
1940                 return rte_flow_error_set(error, ENOTSUP,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "extended metadata register"
1943                                           " isn't available");
1944         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1945         if (ret < 0)
1946                 return ret;
1947         if (!spec)
1948                 return rte_flow_error_set(error, EINVAL,
1949                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1950                                           item->spec,
1951                                           "data cannot be empty");
1952         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1953                 return rte_flow_error_set(error, EINVAL,
1954                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1955                                           &spec->id,
1956                                           "mark id exceeds the limit");
1957         if (!mask)
1958                 mask = &nic_mask;
1959         if (!mask->id)
1960                 return rte_flow_error_set(error, EINVAL,
1961                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1962                                         "mask cannot be zero");
1963
1964         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1965                                         (const uint8_t *)&nic_mask,
1966                                         sizeof(struct rte_flow_item_mark),
1967                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1968         if (ret < 0)
1969                 return ret;
1970         return 0;
1971 }
1972
1973 /**
1974  * Validate META item.
1975  *
1976  * @param[in] dev
1977  *   Pointer to the rte_eth_dev structure.
1978  * @param[in] item
1979  *   Item specification.
1980  * @param[in] attr
1981  *   Attributes of flow that includes this item.
1982  * @param[out] error
1983  *   Pointer to error structure.
1984  *
1985  * @return
1986  *   0 on success, a negative errno value otherwise and rte_errno is set.
1987  */
1988 static int
1989 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1990                            const struct rte_flow_item *item,
1991                            const struct rte_flow_attr *attr,
1992                            struct rte_flow_error *error)
1993 {
1994         struct mlx5_priv *priv = dev->data->dev_private;
1995         struct mlx5_sh_config *config = &priv->sh->config;
1996         const struct rte_flow_item_meta *spec = item->spec;
1997         const struct rte_flow_item_meta *mask = item->mask;
1998         struct rte_flow_item_meta nic_mask = {
1999                 .data = UINT32_MAX
2000         };
2001         int reg;
2002         int ret;
2003
2004         if (!spec)
2005                 return rte_flow_error_set(error, EINVAL,
2006                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2007                                           item->spec,
2008                                           "data cannot be empty");
2009         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2010                 if (!mlx5_flow_ext_mreg_supported(dev))
2011                         return rte_flow_error_set(error, ENOTSUP,
2012                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2013                                           "extended metadata register"
2014                                           " isn't supported");
2015                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2016                 if (reg < 0)
2017                         return reg;
2018                 if (reg == REG_NON)
2019                         return rte_flow_error_set(error, ENOTSUP,
2020                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2021                                         "unavailable extended metadata register");
2022                 if (reg == REG_B)
2023                         return rte_flow_error_set(error, ENOTSUP,
2024                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2025                                           "match on reg_b "
2026                                           "isn't supported");
2027                 if (reg != REG_A)
2028                         nic_mask.data = priv->sh->dv_meta_mask;
2029         } else {
2030                 if (attr->transfer)
2031                         return rte_flow_error_set(error, ENOTSUP,
2032                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2033                                         "extended metadata feature "
2034                                         "should be enabled when "
2035                                         "meta item is requested "
2036                                         "with e-switch mode ");
2037                 if (attr->ingress)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "match on metadata for ingress "
2041                                         "is not supported in legacy "
2042                                         "metadata mode");
2043         }
2044         if (!mask)
2045                 mask = &rte_flow_item_meta_mask;
2046         if (!mask->data)
2047                 return rte_flow_error_set(error, EINVAL,
2048                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2049                                         "mask cannot be zero");
2050
2051         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2052                                         (const uint8_t *)&nic_mask,
2053                                         sizeof(struct rte_flow_item_meta),
2054                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2055         return ret;
2056 }
2057
2058 /**
2059  * Validate TAG item.
2060  *
2061  * @param[in] dev
2062  *   Pointer to the rte_eth_dev structure.
2063  * @param[in] item
2064  *   Item specification.
2065  * @param[in] attr
2066  *   Attributes of flow that includes this item.
2067  * @param[out] error
2068  *   Pointer to error structure.
2069  *
2070  * @return
2071  *   0 on success, a negative errno value otherwise and rte_errno is set.
2072  */
2073 static int
2074 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2075                           const struct rte_flow_item *item,
2076                           const struct rte_flow_attr *attr __rte_unused,
2077                           struct rte_flow_error *error)
2078 {
2079         const struct rte_flow_item_tag *spec = item->spec;
2080         const struct rte_flow_item_tag *mask = item->mask;
2081         const struct rte_flow_item_tag nic_mask = {
2082                 .data = RTE_BE32(UINT32_MAX),
2083                 .index = 0xff,
2084         };
2085         int ret;
2086
2087         if (!mlx5_flow_ext_mreg_supported(dev))
2088                 return rte_flow_error_set(error, ENOTSUP,
2089                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2090                                           "extensive metadata register"
2091                                           " isn't supported");
2092         if (!spec)
2093                 return rte_flow_error_set(error, EINVAL,
2094                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2095                                           item->spec,
2096                                           "data cannot be empty");
2097         if (!mask)
2098                 mask = &rte_flow_item_tag_mask;
2099         if (!mask->data)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2102                                         "mask cannot be zero");
2103
2104         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2105                                         (const uint8_t *)&nic_mask,
2106                                         sizeof(struct rte_flow_item_tag),
2107                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2108         if (ret < 0)
2109                 return ret;
2110         if (mask->index != 0xff)
2111                 return rte_flow_error_set(error, EINVAL,
2112                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2113                                           "partial mask for tag index"
2114                                           " is not supported");
2115         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2116         if (ret < 0)
2117                 return ret;
2118         MLX5_ASSERT(ret != REG_NON);
2119         return 0;
2120 }
2121
2122 /**
2123  * Validate vport item.
2124  *
2125  * @param[in] dev
2126  *   Pointer to the rte_eth_dev structure.
2127  * @param[in] item
2128  *   Item specification.
2129  * @param[in] attr
2130  *   Attributes of flow that includes this item.
2131  * @param[in] item_flags
2132  *   Bit-fields that holds the items detected until now.
2133  * @param[out] error
2134  *   Pointer to error structure.
2135  *
2136  * @return
2137  *   0 on success, a negative errno value otherwise and rte_errno is set.
2138  */
2139 static int
2140 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2141                               const struct rte_flow_item *item,
2142                               const struct rte_flow_attr *attr,
2143                               uint64_t item_flags,
2144                               struct rte_flow_error *error)
2145 {
2146         const struct rte_flow_item_port_id *spec = item->spec;
2147         const struct rte_flow_item_port_id *mask = item->mask;
2148         const struct rte_flow_item_port_id switch_mask = {
2149                         .id = 0xffffffff,
2150         };
2151         struct mlx5_priv *esw_priv;
2152         struct mlx5_priv *dev_priv;
2153         int ret;
2154
2155         if (!attr->transfer)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM,
2158                                           NULL,
2159                                           "match on port id is valid only"
2160                                           " when transfer flag is enabled");
2161         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2162                 return rte_flow_error_set(error, ENOTSUP,
2163                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2164                                           "multiple source ports are not"
2165                                           " supported");
2166         if (!mask)
2167                 mask = &switch_mask;
2168         if (mask->id != 0xffffffff)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2171                                            mask,
2172                                            "no support for partial mask on"
2173                                            " \"id\" field");
2174         ret = mlx5_flow_item_acceptable
2175                                 (item, (const uint8_t *)mask,
2176                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2177                                  sizeof(struct rte_flow_item_port_id),
2178                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2179         if (ret)
2180                 return ret;
2181         if (!spec)
2182                 return 0;
2183         if (spec->id == MLX5_PORT_ESW_MGR)
2184                 return 0;
2185         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2186         if (!esw_priv)
2187                 return rte_flow_error_set(error, rte_errno,
2188                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2189                                           "failed to obtain E-Switch info for"
2190                                           " port");
2191         dev_priv = mlx5_dev_to_eswitch_info(dev);
2192         if (!dev_priv)
2193                 return rte_flow_error_set(error, rte_errno,
2194                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2195                                           NULL,
2196                                           "failed to obtain E-Switch info");
2197         if (esw_priv->domain_id != dev_priv->domain_id)
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2200                                           "cannot match on a port from a"
2201                                           " different E-Switch");
2202         return 0;
2203 }
2204
2205 /**
2206  * Validate VLAN item.
2207  *
2208  * @param[in] item
2209  *   Item specification.
2210  * @param[in] item_flags
2211  *   Bit-fields that holds the items detected until now.
2212  * @param[in] dev
2213  *   Ethernet device flow is being created on.
2214  * @param[out] error
2215  *   Pointer to error structure.
2216  *
2217  * @return
2218  *   0 on success, a negative errno value otherwise and rte_errno is set.
2219  */
2220 static int
2221 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2222                            uint64_t item_flags,
2223                            struct rte_eth_dev *dev,
2224                            struct rte_flow_error *error)
2225 {
2226         const struct rte_flow_item_vlan *mask = item->mask;
2227         const struct rte_flow_item_vlan nic_mask = {
2228                 .tci = RTE_BE16(UINT16_MAX),
2229                 .inner_type = RTE_BE16(UINT16_MAX),
2230                 .has_more_vlan = 1,
2231         };
2232         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2233         int ret;
2234         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2235                                         MLX5_FLOW_LAYER_INNER_L4) :
2236                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2237                                         MLX5_FLOW_LAYER_OUTER_L4);
2238         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2239                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2240
2241         if (item_flags & vlanm)
2242                 return rte_flow_error_set(error, EINVAL,
2243                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2244                                           "multiple VLAN layers not supported");
2245         else if ((item_flags & l34m) != 0)
2246                 return rte_flow_error_set(error, EINVAL,
2247                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2248                                           "VLAN cannot follow L3/L4 layer");
2249         if (!mask)
2250                 mask = &rte_flow_item_vlan_mask;
2251         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2252                                         (const uint8_t *)&nic_mask,
2253                                         sizeof(struct rte_flow_item_vlan),
2254                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2255         if (ret)
2256                 return ret;
2257         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2258                 struct mlx5_priv *priv = dev->data->dev_private;
2259
2260                 if (priv->vmwa_context) {
2261                         /*
2262                          * Non-NULL context means we have a virtual machine
2263                          * and SR-IOV enabled, we have to create VLAN interface
2264                          * to make hypervisor to setup E-Switch vport
2265                          * context correctly. We avoid creating the multiple
2266                          * VLAN interfaces, so we cannot support VLAN tag mask.
2267                          */
2268                         return rte_flow_error_set(error, EINVAL,
2269                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2270                                                   item,
2271                                                   "VLAN tag mask is not"
2272                                                   " supported in virtual"
2273                                                   " environment");
2274                 }
2275         }
2276         return 0;
2277 }
2278
2279 /*
2280  * GTP flags are contained in 1 byte of the format:
2281  * -------------------------------------------
2282  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2283  * |-----------------------------------------|
2284  * | value | Version | PT | Res | E | S | PN |
2285  * -------------------------------------------
2286  *
2287  * Matching is supported only for GTP flags E, S, PN.
2288  */
2289 #define MLX5_GTP_FLAGS_MASK     0x07
2290
2291 /**
2292  * Validate GTP item.
2293  *
2294  * @param[in] dev
2295  *   Pointer to the rte_eth_dev structure.
2296  * @param[in] item
2297  *   Item specification.
2298  * @param[in] item_flags
2299  *   Bit-fields that holds the items detected until now.
2300  * @param[out] error
2301  *   Pointer to error structure.
2302  *
2303  * @return
2304  *   0 on success, a negative errno value otherwise and rte_errno is set.
2305  */
2306 static int
2307 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2308                           const struct rte_flow_item *item,
2309                           uint64_t item_flags,
2310                           struct rte_flow_error *error)
2311 {
2312         struct mlx5_priv *priv = dev->data->dev_private;
2313         const struct rte_flow_item_gtp *spec = item->spec;
2314         const struct rte_flow_item_gtp *mask = item->mask;
2315         const struct rte_flow_item_gtp nic_mask = {
2316                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2317                 .msg_type = 0xff,
2318                 .teid = RTE_BE32(0xffffffff),
2319         };
2320
2321         if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
2322                 return rte_flow_error_set(error, ENOTSUP,
2323                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2324                                           "GTP support is not enabled");
2325         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2326                 return rte_flow_error_set(error, ENOTSUP,
2327                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2328                                           "multiple tunnel layers not"
2329                                           " supported");
2330         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2331                 return rte_flow_error_set(error, EINVAL,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "no outer UDP layer found");
2334         if (!mask)
2335                 mask = &rte_flow_item_gtp_mask;
2336         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2337                 return rte_flow_error_set(error, ENOTSUP,
2338                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2339                                           "Match is supported for GTP"
2340                                           " flags only");
2341         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2342                                          (const uint8_t *)&nic_mask,
2343                                          sizeof(struct rte_flow_item_gtp),
2344                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2345 }
2346
2347 /**
2348  * Validate GTP PSC item.
2349  *
2350  * @param[in] item
2351  *   Item specification.
2352  * @param[in] last_item
2353  *   Previous validated item in the pattern items.
2354  * @param[in] gtp_item
2355  *   Previous GTP item specification.
2356  * @param[in] attr
2357  *   Pointer to flow attributes.
2358  * @param[out] error
2359  *   Pointer to error structure.
2360  *
2361  * @return
2362  *   0 on success, a negative errno value otherwise and rte_errno is set.
2363  */
2364 static int
2365 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2366                               uint64_t last_item,
2367                               const struct rte_flow_item *gtp_item,
2368                               const struct rte_flow_attr *attr,
2369                               struct rte_flow_error *error)
2370 {
2371         const struct rte_flow_item_gtp *gtp_spec;
2372         const struct rte_flow_item_gtp *gtp_mask;
2373         const struct rte_flow_item_gtp_psc *mask;
2374         const struct rte_flow_item_gtp_psc nic_mask = {
2375                 .hdr.type = 0xF,
2376                 .hdr.qfi = 0x3F,
2377         };
2378
2379         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2380                 return rte_flow_error_set
2381                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2382                          "GTP PSC item must be preceded with GTP item");
2383         gtp_spec = gtp_item->spec;
2384         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2385         /* GTP spec and E flag is requested to match zero. */
2386         if (gtp_spec &&
2387                 (gtp_mask->v_pt_rsv_flags &
2388                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2389                 return rte_flow_error_set
2390                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2391                          "GTP E flag must be 1 to match GTP PSC");
2392         /* Check the flow is not created in group zero. */
2393         if (!attr->transfer && !attr->group)
2394                 return rte_flow_error_set
2395                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2396                          "GTP PSC is not supported for group 0");
2397         /* GTP spec is here and E flag is requested to match zero. */
2398         if (!item->spec)
2399                 return 0;
2400         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2401         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2402                                          (const uint8_t *)&nic_mask,
2403                                          sizeof(struct rte_flow_item_gtp_psc),
2404                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2405 }
2406
2407 /**
2408  * Validate IPV4 item.
2409  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2410  * add specific validation of fragment_offset field,
2411  *
2412  * @param[in] item
2413  *   Item specification.
2414  * @param[in] item_flags
2415  *   Bit-fields that holds the items detected until now.
2416  * @param[out] error
2417  *   Pointer to error structure.
2418  *
2419  * @return
2420  *   0 on success, a negative errno value otherwise and rte_errno is set.
2421  */
2422 static int
2423 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2424                            const struct rte_flow_item *item,
2425                            uint64_t item_flags, uint64_t last_item,
2426                            uint16_t ether_type, struct rte_flow_error *error)
2427 {
2428         int ret;
2429         struct mlx5_priv *priv = dev->data->dev_private;
2430         struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
2431         const struct rte_flow_item_ipv4 *spec = item->spec;
2432         const struct rte_flow_item_ipv4 *last = item->last;
2433         const struct rte_flow_item_ipv4 *mask = item->mask;
2434         rte_be16_t fragment_offset_spec = 0;
2435         rte_be16_t fragment_offset_last = 0;
2436         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2437                 .hdr = {
2438                         .src_addr = RTE_BE32(0xffffffff),
2439                         .dst_addr = RTE_BE32(0xffffffff),
2440                         .type_of_service = 0xff,
2441                         .fragment_offset = RTE_BE16(0xffff),
2442                         .next_proto_id = 0xff,
2443                         .time_to_live = 0xff,
2444                 },
2445         };
2446
2447         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2448                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2449                 bool ihl_cap = !tunnel ?
2450                                attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
2451                 if (!ihl_cap)
2452                         return rte_flow_error_set(error, ENOTSUP,
2453                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2454                                                   item,
2455                                                   "IPV4 ihl offload not supported");
2456                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2457         }
2458         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2459                                            ether_type, &nic_ipv4_mask,
2460                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2461         if (ret < 0)
2462                 return ret;
2463         if (spec && mask)
2464                 fragment_offset_spec = spec->hdr.fragment_offset &
2465                                        mask->hdr.fragment_offset;
2466         if (!fragment_offset_spec)
2467                 return 0;
2468         /*
2469          * spec and mask are valid, enforce using full mask to make sure the
2470          * complete value is used correctly.
2471          */
2472         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2473                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2474                 return rte_flow_error_set(error, EINVAL,
2475                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2476                                           item, "must use full mask for"
2477                                           " fragment_offset");
2478         /*
2479          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2480          * indicating this is 1st fragment of fragmented packet.
2481          * This is not yet supported in MLX5, return appropriate error message.
2482          */
2483         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2484                 return rte_flow_error_set(error, ENOTSUP,
2485                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2486                                           "match on first fragment not "
2487                                           "supported");
2488         if (fragment_offset_spec && !last)
2489                 return rte_flow_error_set(error, ENOTSUP,
2490                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2491                                           "specified value not supported");
2492         /* spec and last are valid, validate the specified range. */
2493         fragment_offset_last = last->hdr.fragment_offset &
2494                                mask->hdr.fragment_offset;
2495         /*
2496          * Match on fragment_offset spec 0x2001 and last 0x3fff
2497          * means MF is 1 and frag-offset is > 0.
2498          * This packet is fragment 2nd and onward, excluding last.
2499          * This is not yet supported in MLX5, return appropriate
2500          * error message.
2501          */
2502         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2503             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2504                 return rte_flow_error_set(error, ENOTSUP,
2505                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2506                                           last, "match on following "
2507                                           "fragments not supported");
2508         /*
2509          * Match on fragment_offset spec 0x0001 and last 0x1fff
2510          * means MF is 0 and frag-offset is > 0.
2511          * This packet is last fragment of fragmented packet.
2512          * This is not yet supported in MLX5, return appropriate
2513          * error message.
2514          */
2515         if (fragment_offset_spec == RTE_BE16(1) &&
2516             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2517                 return rte_flow_error_set(error, ENOTSUP,
2518                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2519                                           last, "match on last "
2520                                           "fragment not supported");
2521         /*
2522          * Match on fragment_offset spec 0x0001 and last 0x3fff
2523          * means MF and/or frag-offset is not 0.
2524          * This is a fragmented packet.
2525          * Other range values are invalid and rejected.
2526          */
2527         if (!(fragment_offset_spec == RTE_BE16(1) &&
2528               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2529                 return rte_flow_error_set(error, ENOTSUP,
2530                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2531                                           "specified range not supported");
2532         return 0;
2533 }
2534
2535 /**
2536  * Validate IPV6 fragment extension item.
2537  *
2538  * @param[in] item
2539  *   Item specification.
2540  * @param[in] item_flags
2541  *   Bit-fields that holds the items detected until now.
2542  * @param[out] error
2543  *   Pointer to error structure.
2544  *
2545  * @return
2546  *   0 on success, a negative errno value otherwise and rte_errno is set.
2547  */
2548 static int
2549 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2550                                     uint64_t item_flags,
2551                                     struct rte_flow_error *error)
2552 {
2553         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2554         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2555         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2556         rte_be16_t frag_data_spec = 0;
2557         rte_be16_t frag_data_last = 0;
2558         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2559         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2560                                       MLX5_FLOW_LAYER_OUTER_L4;
2561         int ret = 0;
2562         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2563                 .hdr = {
2564                         .next_header = 0xff,
2565                         .frag_data = RTE_BE16(0xffff),
2566                 },
2567         };
2568
2569         if (item_flags & l4m)
2570                 return rte_flow_error_set(error, EINVAL,
2571                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2572                                           "ipv6 fragment extension item cannot "
2573                                           "follow L4 item.");
2574         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2575             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2576                 return rte_flow_error_set(error, EINVAL,
2577                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2578                                           "ipv6 fragment extension item must "
2579                                           "follow ipv6 item");
2580         if (spec && mask)
2581                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2582         if (!frag_data_spec)
2583                 return 0;
2584         /*
2585          * spec and mask are valid, enforce using full mask to make sure the
2586          * complete value is used correctly.
2587          */
2588         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2589                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2590                 return rte_flow_error_set(error, EINVAL,
2591                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2592                                           item, "must use full mask for"
2593                                           " frag_data");
2594         /*
2595          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2596          * This is 1st fragment of fragmented packet.
2597          */
2598         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2599                 return rte_flow_error_set(error, ENOTSUP,
2600                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2601                                           "match on first fragment not "
2602                                           "supported");
2603         if (frag_data_spec && !last)
2604                 return rte_flow_error_set(error, EINVAL,
2605                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2606                                           "specified value not supported");
2607         ret = mlx5_flow_item_acceptable
2608                                 (item, (const uint8_t *)mask,
2609                                  (const uint8_t *)&nic_mask,
2610                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2611                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2612         if (ret)
2613                 return ret;
2614         /* spec and last are valid, validate the specified range. */
2615         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2616         /*
2617          * Match on frag_data spec 0x0009 and last 0xfff9
2618          * means M is 1 and frag-offset is > 0.
2619          * This packet is fragment 2nd and onward, excluding last.
2620          * This is not yet supported in MLX5, return appropriate
2621          * error message.
2622          */
2623         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2624                                        RTE_IPV6_EHDR_MF_MASK) &&
2625             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2626                 return rte_flow_error_set(error, ENOTSUP,
2627                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2628                                           last, "match on following "
2629                                           "fragments not supported");
2630         /*
2631          * Match on frag_data spec 0x0008 and last 0xfff8
2632          * means M is 0 and frag-offset is > 0.
2633          * This packet is last fragment of fragmented packet.
2634          * This is not yet supported in MLX5, return appropriate
2635          * error message.
2636          */
2637         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2638             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2639                 return rte_flow_error_set(error, ENOTSUP,
2640                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2641                                           last, "match on last "
2642                                           "fragment not supported");
2643         /* Other range values are invalid and rejected. */
2644         return rte_flow_error_set(error, EINVAL,
2645                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2646                                   "specified range not supported");
2647 }
2648
2649 /*
2650  * Validate ASO CT item.
2651  *
2652  * @param[in] dev
2653  *   Pointer to the rte_eth_dev structure.
2654  * @param[in] item
2655  *   Item specification.
2656  * @param[in] item_flags
2657  *   Pointer to bit-fields that holds the items detected until now.
2658  * @param[out] error
2659  *   Pointer to error structure.
2660  *
2661  * @return
2662  *   0 on success, a negative errno value otherwise and rte_errno is set.
2663  */
2664 static int
2665 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2666                              const struct rte_flow_item *item,
2667                              uint64_t *item_flags,
2668                              struct rte_flow_error *error)
2669 {
2670         const struct rte_flow_item_conntrack *spec = item->spec;
2671         const struct rte_flow_item_conntrack *mask = item->mask;
2672         RTE_SET_USED(dev);
2673         uint32_t flags;
2674
2675         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2676                 return rte_flow_error_set(error, EINVAL,
2677                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2678                                           "Only one CT is supported");
2679         if (!mask)
2680                 mask = &rte_flow_item_conntrack_mask;
2681         flags = spec->flags & mask->flags;
2682         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2683             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2684              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2685              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2686                 return rte_flow_error_set(error, EINVAL,
2687                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2688                                           "Conflict status bits");
2689         /* State change also needs to be considered. */
2690         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2691         return 0;
2692 }
2693
2694 /**
2695  * Validate the pop VLAN action.
2696  *
2697  * @param[in] dev
2698  *   Pointer to the rte_eth_dev structure.
2699  * @param[in] action_flags
2700  *   Holds the actions detected until now.
2701  * @param[in] action
2702  *   Pointer to the pop vlan action.
2703  * @param[in] item_flags
2704  *   The items found in this flow rule.
2705  * @param[in] attr
2706  *   Pointer to flow attributes.
2707  * @param[out] error
2708  *   Pointer to error structure.
2709  *
2710  * @return
2711  *   0 on success, a negative errno value otherwise and rte_errno is set.
2712  */
2713 static int
2714 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2715                                  uint64_t action_flags,
2716                                  const struct rte_flow_action *action,
2717                                  uint64_t item_flags,
2718                                  const struct rte_flow_attr *attr,
2719                                  struct rte_flow_error *error)
2720 {
2721         const struct mlx5_priv *priv = dev->data->dev_private;
2722         struct mlx5_dev_ctx_shared *sh = priv->sh;
2723         bool direction_error = false;
2724
2725         if (!priv->sh->pop_vlan_action)
2726                 return rte_flow_error_set(error, ENOTSUP,
2727                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2728                                           NULL,
2729                                           "pop vlan action is not supported");
2730         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2731         if (attr->transfer) {
2732                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2733                 bool is_cx5 = sh->steering_format_version ==
2734                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2735
2736                 if (fdb_tx && is_cx5)
2737                         direction_error = true;
2738         } else if (attr->egress) {
2739                 direction_error = true;
2740         }
2741         if (direction_error)
2742                 return rte_flow_error_set(error, ENOTSUP,
2743                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2744                                           NULL,
2745                                           "pop vlan action not supported for egress");
2746         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2747                 return rte_flow_error_set(error, ENOTSUP,
2748                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2749                                           "no support for multiple VLAN "
2750                                           "actions");
2751         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2752         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2753             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2754                 return rte_flow_error_set(error, ENOTSUP,
2755                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2756                                           NULL,
2757                                           "cannot pop vlan after decap without "
2758                                           "match on inner vlan in the flow");
2759         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2760         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2761             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2764                                           NULL,
2765                                           "cannot pop vlan without a "
2766                                           "match on (outer) vlan in the flow");
2767         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2768                 return rte_flow_error_set(error, EINVAL,
2769                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2770                                           "wrong action order, port_id should "
2771                                           "be after pop VLAN action");
2772         if (!attr->transfer && priv->representor)
2773                 return rte_flow_error_set(error, ENOTSUP,
2774                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2775                                           "pop vlan action for VF representor "
2776                                           "not supported on NIC table");
2777         return 0;
2778 }
2779
2780 /**
2781  * Get VLAN default info from vlan match info.
2782  *
2783  * @param[in] items
2784  *   the list of item specifications.
2785  * @param[out] vlan
2786  *   pointer VLAN info to fill to.
2787  *
2788  * @return
2789  *   0 on success, a negative errno value otherwise and rte_errno is set.
2790  */
2791 static void
2792 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2793                                   struct rte_vlan_hdr *vlan)
2794 {
2795         const struct rte_flow_item_vlan nic_mask = {
2796                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2797                                 MLX5DV_FLOW_VLAN_VID_MASK),
2798                 .inner_type = RTE_BE16(0xffff),
2799         };
2800
2801         if (items == NULL)
2802                 return;
2803         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2804                 int type = items->type;
2805
2806                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2807                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2808                         break;
2809         }
2810         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2811                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2812                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2813
2814                 /* If VLAN item in pattern doesn't contain data, return here. */
2815                 if (!vlan_v)
2816                         return;
2817                 if (!vlan_m)
2818                         vlan_m = &nic_mask;
2819                 /* Only full match values are accepted */
2820                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2821                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2822                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2823                         vlan->vlan_tci |=
2824                                 rte_be_to_cpu_16(vlan_v->tci &
2825                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2826                 }
2827                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2828                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2829                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2830                         vlan->vlan_tci |=
2831                                 rte_be_to_cpu_16(vlan_v->tci &
2832                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2833                 }
2834                 if (vlan_m->inner_type == nic_mask.inner_type)
2835                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2836                                                            vlan_m->inner_type);
2837         }
2838 }
2839
2840 /**
2841  * Validate the push VLAN action.
2842  *
2843  * @param[in] dev
2844  *   Pointer to the rte_eth_dev structure.
2845  * @param[in] action_flags
2846  *   Holds the actions detected until now.
2847  * @param[in] item_flags
2848  *   The items found in this flow rule.
2849  * @param[in] action
2850  *   Pointer to the action structure.
2851  * @param[in] attr
2852  *   Pointer to flow attributes
2853  * @param[out] error
2854  *   Pointer to error structure.
2855  *
2856  * @return
2857  *   0 on success, a negative errno value otherwise and rte_errno is set.
2858  */
2859 static int
2860 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2861                                   uint64_t action_flags,
2862                                   const struct rte_flow_item_vlan *vlan_m,
2863                                   const struct rte_flow_action *action,
2864                                   const struct rte_flow_attr *attr,
2865                                   struct rte_flow_error *error)
2866 {
2867         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2868         const struct mlx5_priv *priv = dev->data->dev_private;
2869         struct mlx5_dev_ctx_shared *sh = priv->sh;
2870         bool direction_error = false;
2871
2872         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2873             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2874                 return rte_flow_error_set(error, EINVAL,
2875                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2876                                           "invalid vlan ethertype");
2877         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2878                 return rte_flow_error_set(error, EINVAL,
2879                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2880                                           "wrong action order, port_id should "
2881                                           "be after push VLAN");
2882         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2883         if (attr->transfer) {
2884                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2885                 bool is_cx5 = sh->steering_format_version ==
2886                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2887
2888                 if (!fdb_tx && is_cx5)
2889                         direction_error = true;
2890         } else if (attr->ingress) {
2891                 direction_error = true;
2892         }
2893         if (direction_error)
2894                 return rte_flow_error_set(error, ENOTSUP,
2895                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2896                                           NULL,
2897                                           "push vlan action not supported for ingress");
2898         if (!attr->transfer && priv->representor)
2899                 return rte_flow_error_set(error, ENOTSUP,
2900                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2901                                           "push vlan action for VF representor "
2902                                           "not supported on NIC table");
2903         if (vlan_m &&
2904             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2905             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2906                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2907             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2908             !(mlx5_flow_find_action
2909                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2910                 return rte_flow_error_set(error, EINVAL,
2911                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2912                                           "not full match mask on VLAN PCP and "
2913                                           "there is no of_set_vlan_pcp action, "
2914                                           "push VLAN action cannot figure out "
2915                                           "PCP value");
2916         if (vlan_m &&
2917             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2918             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2919                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2920             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2921             !(mlx5_flow_find_action
2922                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2923                 return rte_flow_error_set(error, EINVAL,
2924                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2925                                           "not full match mask on VLAN VID and "
2926                                           "there is no of_set_vlan_vid action, "
2927                                           "push VLAN action cannot figure out "
2928                                           "VID value");
2929         (void)attr;
2930         return 0;
2931 }
2932
2933 /**
2934  * Validate the set VLAN PCP.
2935  *
2936  * @param[in] action_flags
2937  *   Holds the actions detected until now.
2938  * @param[in] actions
2939  *   Pointer to the list of actions remaining in the flow rule.
2940  * @param[out] error
2941  *   Pointer to error structure.
2942  *
2943  * @return
2944  *   0 on success, a negative errno value otherwise and rte_errno is set.
2945  */
2946 static int
2947 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2948                                      const struct rte_flow_action actions[],
2949                                      struct rte_flow_error *error)
2950 {
2951         const struct rte_flow_action *action = actions;
2952         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2953
2954         if (conf->vlan_pcp > 7)
2955                 return rte_flow_error_set(error, EINVAL,
2956                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2957                                           "VLAN PCP value is too big");
2958         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2959                 return rte_flow_error_set(error, ENOTSUP,
2960                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2961                                           "set VLAN PCP action must follow "
2962                                           "the push VLAN action");
2963         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2964                 return rte_flow_error_set(error, ENOTSUP,
2965                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2966                                           "Multiple VLAN PCP modification are "
2967                                           "not supported");
2968         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2969                 return rte_flow_error_set(error, EINVAL,
2970                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2971                                           "wrong action order, port_id should "
2972                                           "be after set VLAN PCP");
2973         return 0;
2974 }
2975
2976 /**
2977  * Validate the set VLAN VID.
2978  *
2979  * @param[in] item_flags
2980  *   Holds the items detected in this rule.
2981  * @param[in] action_flags
2982  *   Holds the actions detected until now.
2983  * @param[in] actions
2984  *   Pointer to the list of actions remaining in the flow rule.
2985  * @param[out] error
2986  *   Pointer to error structure.
2987  *
2988  * @return
2989  *   0 on success, a negative errno value otherwise and rte_errno is set.
2990  */
2991 static int
2992 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2993                                      uint64_t action_flags,
2994                                      const struct rte_flow_action actions[],
2995                                      struct rte_flow_error *error)
2996 {
2997         const struct rte_flow_action *action = actions;
2998         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2999
3000         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3001                 return rte_flow_error_set(error, EINVAL,
3002                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3003                                           "VLAN VID value is too big");
3004         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3005             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3006                 return rte_flow_error_set(error, ENOTSUP,
3007                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3008                                           "set VLAN VID action must follow push"
3009                                           " VLAN action or match on VLAN item");
3010         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3011                 return rte_flow_error_set(error, ENOTSUP,
3012                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3013                                           "Multiple VLAN VID modifications are "
3014                                           "not supported");
3015         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3016                 return rte_flow_error_set(error, EINVAL,
3017                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3018                                           "wrong action order, port_id should "
3019                                           "be after set VLAN VID");
3020         return 0;
3021 }
3022
3023 /*
3024  * Validate the FLAG action.
3025  *
3026  * @param[in] dev
3027  *   Pointer to the rte_eth_dev structure.
3028  * @param[in] action_flags
3029  *   Holds the actions detected until now.
3030  * @param[in] attr
3031  *   Pointer to flow attributes
3032  * @param[out] error
3033  *   Pointer to error structure.
3034  *
3035  * @return
3036  *   0 on success, a negative errno value otherwise and rte_errno is set.
3037  */
3038 static int
3039 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3040                              uint64_t action_flags,
3041                              const struct rte_flow_attr *attr,
3042                              struct rte_flow_error *error)
3043 {
3044         struct mlx5_priv *priv = dev->data->dev_private;
3045         struct mlx5_sh_config *config = &priv->sh->config;
3046         int ret;
3047
3048         /* Fall back if no extended metadata register support. */
3049         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3050                 return mlx5_flow_validate_action_flag(action_flags, attr,
3051                                                       error);
3052         /* Extensive metadata mode requires registers. */
3053         if (!mlx5_flow_ext_mreg_supported(dev))
3054                 return rte_flow_error_set(error, ENOTSUP,
3055                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3056                                           "no metadata registers "
3057                                           "to support flag action");
3058         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3059                 return rte_flow_error_set(error, ENOTSUP,
3060                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3061                                           "extended metadata register"
3062                                           " isn't available");
3063         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3064         if (ret < 0)
3065                 return ret;
3066         MLX5_ASSERT(ret > 0);
3067         if (action_flags & MLX5_FLOW_ACTION_MARK)
3068                 return rte_flow_error_set(error, EINVAL,
3069                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3070                                           "can't mark and flag in same flow");
3071         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3072                 return rte_flow_error_set(error, EINVAL,
3073                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3074                                           "can't have 2 flag"
3075                                           " actions in same flow");
3076         return 0;
3077 }
3078
3079 /**
3080  * Validate MARK action.
3081  *
3082  * @param[in] dev
3083  *   Pointer to the rte_eth_dev structure.
3084  * @param[in] action
3085  *   Pointer to action.
3086  * @param[in] action_flags
3087  *   Holds the actions detected until now.
3088  * @param[in] attr
3089  *   Pointer to flow attributes
3090  * @param[out] error
3091  *   Pointer to error structure.
3092  *
3093  * @return
3094  *   0 on success, a negative errno value otherwise and rte_errno is set.
3095  */
3096 static int
3097 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3098                              const struct rte_flow_action *action,
3099                              uint64_t action_flags,
3100                              const struct rte_flow_attr *attr,
3101                              struct rte_flow_error *error)
3102 {
3103         struct mlx5_priv *priv = dev->data->dev_private;
3104         struct mlx5_sh_config *config = &priv->sh->config;
3105         const struct rte_flow_action_mark *mark = action->conf;
3106         int ret;
3107
3108         if (is_tunnel_offload_active(dev))
3109                 return rte_flow_error_set(error, ENOTSUP,
3110                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3111                                           "no mark action "
3112                                           "if tunnel offload active");
3113         /* Fall back if no extended metadata register support. */
3114         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3115                 return mlx5_flow_validate_action_mark(action, action_flags,
3116                                                       attr, error);
3117         /* Extensive metadata mode requires registers. */
3118         if (!mlx5_flow_ext_mreg_supported(dev))
3119                 return rte_flow_error_set(error, ENOTSUP,
3120                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3121                                           "no metadata registers "
3122                                           "to support mark action");
3123         if (!priv->sh->dv_mark_mask)
3124                 return rte_flow_error_set(error, ENOTSUP,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3126                                           "extended metadata register"
3127                                           " isn't available");
3128         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3129         if (ret < 0)
3130                 return ret;
3131         MLX5_ASSERT(ret > 0);
3132         if (!mark)
3133                 return rte_flow_error_set(error, EINVAL,
3134                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3135                                           "configuration cannot be null");
3136         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3137                 return rte_flow_error_set(error, EINVAL,
3138                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3139                                           &mark->id,
3140                                           "mark id exceeds the limit");
3141         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3142                 return rte_flow_error_set(error, EINVAL,
3143                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3144                                           "can't flag and mark in same flow");
3145         if (action_flags & MLX5_FLOW_ACTION_MARK)
3146                 return rte_flow_error_set(error, EINVAL,
3147                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3148                                           "can't have 2 mark actions in same"
3149                                           " flow");
3150         return 0;
3151 }
3152
3153 /**
3154  * Validate SET_META action.
3155  *
3156  * @param[in] dev
3157  *   Pointer to the rte_eth_dev structure.
3158  * @param[in] action
3159  *   Pointer to the action structure.
3160  * @param[in] action_flags
3161  *   Holds the actions detected until now.
3162  * @param[in] attr
3163  *   Pointer to flow attributes
3164  * @param[out] error
3165  *   Pointer to error structure.
3166  *
3167  * @return
3168  *   0 on success, a negative errno value otherwise and rte_errno is set.
3169  */
3170 static int
3171 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3172                                  const struct rte_flow_action *action,
3173                                  uint64_t action_flags __rte_unused,
3174                                  const struct rte_flow_attr *attr,
3175                                  struct rte_flow_error *error)
3176 {
3177         struct mlx5_priv *priv = dev->data->dev_private;
3178         struct mlx5_sh_config *config = &priv->sh->config;
3179         const struct rte_flow_action_set_meta *conf;
3180         uint32_t nic_mask = UINT32_MAX;
3181         int reg;
3182
3183         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3184             !mlx5_flow_ext_mreg_supported(dev))
3185                 return rte_flow_error_set(error, ENOTSUP,
3186                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3187                                           "extended metadata register"
3188                                           " isn't supported");
3189         reg = flow_dv_get_metadata_reg(dev, attr, error);
3190         if (reg < 0)
3191                 return reg;
3192         if (reg == REG_NON)
3193                 return rte_flow_error_set(error, ENOTSUP,
3194                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3195                                           "unavailable extended metadata register");
3196         if (reg != REG_A && reg != REG_B) {
3197                 struct mlx5_priv *priv = dev->data->dev_private;
3198
3199                 nic_mask = priv->sh->dv_meta_mask;
3200         }
3201         if (!(action->conf))
3202                 return rte_flow_error_set(error, EINVAL,
3203                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3204                                           "configuration cannot be null");
3205         conf = (const struct rte_flow_action_set_meta *)action->conf;
3206         if (!conf->mask)
3207                 return rte_flow_error_set(error, EINVAL,
3208                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3209                                           "zero mask doesn't have any effect");
3210         if (conf->mask & ~nic_mask)
3211                 return rte_flow_error_set(error, EINVAL,
3212                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3213                                           "meta data must be within reg C0");
3214         return 0;
3215 }
3216
3217 /**
3218  * Validate SET_TAG action.
3219  *
3220  * @param[in] dev
3221  *   Pointer to the rte_eth_dev structure.
3222  * @param[in] action
3223  *   Pointer to the action structure.
3224  * @param[in] action_flags
3225  *   Holds the actions detected until now.
3226  * @param[in] attr
3227  *   Pointer to flow attributes
3228  * @param[out] error
3229  *   Pointer to error structure.
3230  *
3231  * @return
3232  *   0 on success, a negative errno value otherwise and rte_errno is set.
3233  */
3234 static int
3235 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3236                                 const struct rte_flow_action *action,
3237                                 uint64_t action_flags,
3238                                 const struct rte_flow_attr *attr,
3239                                 struct rte_flow_error *error)
3240 {
3241         const struct rte_flow_action_set_tag *conf;
3242         const uint64_t terminal_action_flags =
3243                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3244                 MLX5_FLOW_ACTION_RSS;
3245         int ret;
3246
3247         if (!mlx5_flow_ext_mreg_supported(dev))
3248                 return rte_flow_error_set(error, ENOTSUP,
3249                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3250                                           "extensive metadata register"
3251                                           " isn't supported");
3252         if (!(action->conf))
3253                 return rte_flow_error_set(error, EINVAL,
3254                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3255                                           "configuration cannot be null");
3256         conf = (const struct rte_flow_action_set_tag *)action->conf;
3257         if (!conf->mask)
3258                 return rte_flow_error_set(error, EINVAL,
3259                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3260                                           "zero mask doesn't have any effect");
3261         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3262         if (ret < 0)
3263                 return ret;
3264         if (!attr->transfer && attr->ingress &&
3265             (action_flags & terminal_action_flags))
3266                 return rte_flow_error_set(error, EINVAL,
3267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3268                                           "set_tag has no effect"
3269                                           " with terminal actions");
3270         return 0;
3271 }
3272
3273 /**
3274  * Validate count action.
3275  *
3276  * @param[in] dev
3277  *   Pointer to rte_eth_dev structure.
3278  * @param[in] shared
3279  *   Indicator if action is shared.
3280  * @param[in] action_flags
3281  *   Holds the actions detected until now.
3282  * @param[out] error
3283  *   Pointer to error structure.
3284  *
3285  * @return
3286  *   0 on success, a negative errno value otherwise and rte_errno is set.
3287  */
3288 static int
3289 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3290                               uint64_t action_flags,
3291                               struct rte_flow_error *error)
3292 {
3293         struct mlx5_priv *priv = dev->data->dev_private;
3294
3295         if (!priv->sh->cdev->config.devx)
3296                 goto notsup_err;
3297         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3298                 return rte_flow_error_set(error, EINVAL,
3299                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3300                                           "duplicate count actions set");
3301         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3302             !priv->sh->flow_hit_aso_en)
3303                 return rte_flow_error_set(error, EINVAL,
3304                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3305                                           "old age and shared count combination is not supported");
3306 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3307         return 0;
3308 #endif
3309 notsup_err:
3310         return rte_flow_error_set
3311                       (error, ENOTSUP,
3312                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3313                        NULL,
3314                        "count action not supported");
3315 }
3316
3317 /**
3318  * Validate the L2 encap action.
3319  *
3320  * @param[in] dev
3321  *   Pointer to the rte_eth_dev structure.
3322  * @param[in] action_flags
3323  *   Holds the actions detected until now.
3324  * @param[in] action
3325  *   Pointer to the action structure.
3326  * @param[in] attr
3327  *   Pointer to flow attributes.
3328  * @param[out] error
3329  *   Pointer to error structure.
3330  *
3331  * @return
3332  *   0 on success, a negative errno value otherwise and rte_errno is set.
3333  */
3334 static int
3335 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3336                                  uint64_t action_flags,
3337                                  const struct rte_flow_action *action,
3338                                  const struct rte_flow_attr *attr,
3339                                  struct rte_flow_error *error)
3340 {
3341         const struct mlx5_priv *priv = dev->data->dev_private;
3342
3343         if (!(action->conf))
3344                 return rte_flow_error_set(error, EINVAL,
3345                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3346                                           "configuration cannot be null");
3347         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3348                 return rte_flow_error_set(error, EINVAL,
3349                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3350                                           "can only have a single encap action "
3351                                           "in a flow");
3352         if (!attr->transfer && priv->representor)
3353                 return rte_flow_error_set(error, ENOTSUP,
3354                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3355                                           "encap action for VF representor "
3356                                           "not supported on NIC table");
3357         return 0;
3358 }
3359
3360 /**
3361  * Validate a decap action.
3362  *
3363  * @param[in] dev
3364  *   Pointer to the rte_eth_dev structure.
3365  * @param[in] action_flags
3366  *   Holds the actions detected until now.
3367  * @param[in] action
3368  *   Pointer to the action structure.
3369  * @param[in] item_flags
3370  *   Holds the items detected.
3371  * @param[in] attr
3372  *   Pointer to flow attributes
3373  * @param[out] error
3374  *   Pointer to error structure.
3375  *
3376  * @return
3377  *   0 on success, a negative errno value otherwise and rte_errno is set.
3378  */
3379 static int
3380 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3381                               uint64_t action_flags,
3382                               const struct rte_flow_action *action,
3383                               const uint64_t item_flags,
3384                               const struct rte_flow_attr *attr,
3385                               struct rte_flow_error *error)
3386 {
3387         const struct mlx5_priv *priv = dev->data->dev_private;
3388
3389         if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
3390             !priv->sh->config.decap_en)
3391                 return rte_flow_error_set(error, ENOTSUP,
3392                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3393                                           "decap is not enabled");
3394         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3395                 return rte_flow_error_set(error, ENOTSUP,
3396                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3397                                           action_flags &
3398                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3399                                           "have a single decap action" : "decap "
3400                                           "after encap is not supported");
3401         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3402                 return rte_flow_error_set(error, EINVAL,
3403                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3404                                           "can't have decap action after"
3405                                           " modify action");
3406         if (attr->egress)
3407                 return rte_flow_error_set(error, ENOTSUP,
3408                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3409                                           NULL,
3410                                           "decap action not supported for "
3411                                           "egress");
3412         if (!attr->transfer && priv->representor)
3413                 return rte_flow_error_set(error, ENOTSUP,
3414                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3415                                           "decap action for VF representor "
3416                                           "not supported on NIC table");
3417         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3418             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3419                 return rte_flow_error_set(error, ENOTSUP,
3420                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3421                                 "VXLAN item should be present for VXLAN decap");
3422         return 0;
3423 }
3424
3425 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3426
3427 /**
3428  * Validate the raw encap and decap actions.
3429  *
3430  * @param[in] dev
3431  *   Pointer to the rte_eth_dev structure.
3432  * @param[in] decap
3433  *   Pointer to the decap action.
3434  * @param[in] encap
3435  *   Pointer to the encap action.
3436  * @param[in] attr
3437  *   Pointer to flow attributes
3438  * @param[in/out] action_flags
3439  *   Holds the actions detected until now.
3440  * @param[out] actions_n
3441  *   pointer to the number of actions counter.
3442  * @param[in] action
3443  *   Pointer to the action structure.
3444  * @param[in] item_flags
3445  *   Holds the items detected.
3446  * @param[out] error
3447  *   Pointer to error structure.
3448  *
3449  * @return
3450  *   0 on success, a negative errno value otherwise and rte_errno is set.
3451  */
3452 static int
3453 flow_dv_validate_action_raw_encap_decap
3454         (struct rte_eth_dev *dev,
3455          const struct rte_flow_action_raw_decap *decap,
3456          const struct rte_flow_action_raw_encap *encap,
3457          const struct rte_flow_attr *attr, uint64_t *action_flags,
3458          int *actions_n, const struct rte_flow_action *action,
3459          uint64_t item_flags, struct rte_flow_error *error)
3460 {
3461         const struct mlx5_priv *priv = dev->data->dev_private;
3462         int ret;
3463
3464         if (encap && (!encap->size || !encap->data))
3465                 return rte_flow_error_set(error, EINVAL,
3466                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3467                                           "raw encap data cannot be empty");
3468         if (decap && encap) {
3469                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3470                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3471                         /* L3 encap. */
3472                         decap = NULL;
3473                 else if (encap->size <=
3474                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3475                            decap->size >
3476                            MLX5_ENCAPSULATION_DECISION_SIZE)
3477                         /* L3 decap. */
3478                         encap = NULL;
3479                 else if (encap->size >
3480                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3481                            decap->size >
3482                            MLX5_ENCAPSULATION_DECISION_SIZE)
3483                         /* 2 L2 actions: encap and decap. */
3484                         ;
3485                 else
3486                         return rte_flow_error_set(error,
3487                                 ENOTSUP,
3488                                 RTE_FLOW_ERROR_TYPE_ACTION,
3489                                 NULL, "unsupported too small "
3490                                 "raw decap and too small raw "
3491                                 "encap combination");
3492         }
3493         if (decap) {
3494                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3495                                                     item_flags, attr, error);
3496                 if (ret < 0)
3497                         return ret;
3498                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3499                 ++(*actions_n);
3500         }
3501         if (encap) {
3502                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3503                         return rte_flow_error_set(error, ENOTSUP,
3504                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3505                                                   NULL,
3506                                                   "small raw encap size");
3507                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3508                         return rte_flow_error_set(error, EINVAL,
3509                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3510                                                   NULL,
3511                                                   "more than one encap action");
3512                 if (!attr->transfer && priv->representor)
3513                         return rte_flow_error_set
3514                                         (error, ENOTSUP,
3515                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3516                                          "encap action for VF representor "
3517                                          "not supported on NIC table");
3518                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3519                 ++(*actions_n);
3520         }
3521         return 0;
3522 }
3523
3524 /*
3525  * Validate the ASO CT action.
3526  *
3527  * @param[in] dev
3528  *   Pointer to the rte_eth_dev structure.
3529  * @param[in] action_flags
3530  *   Holds the actions detected until now.
3531  * @param[in] item_flags
3532  *   The items found in this flow rule.
3533  * @param[in] attr
3534  *   Pointer to flow attributes.
3535  * @param[out] error
3536  *   Pointer to error structure.
3537  *
3538  * @return
3539  *   0 on success, a negative errno value otherwise and rte_errno is set.
3540  */
3541 static int
3542 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3543                                uint64_t action_flags,
3544                                uint64_t item_flags,
3545                                const struct rte_flow_attr *attr,
3546                                struct rte_flow_error *error)
3547 {
3548         RTE_SET_USED(dev);
3549
3550         if (attr->group == 0 && !attr->transfer)
3551                 return rte_flow_error_set(error, ENOTSUP,
3552                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3553                                           NULL,
3554                                           "Only support non-root table");
3555         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3556                 return rte_flow_error_set(error, ENOTSUP,
3557                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3558                                           "CT cannot follow a fate action");
3559         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3560             (action_flags & MLX5_FLOW_ACTION_AGE))
3561                 return rte_flow_error_set(error, EINVAL,
3562                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3563                                           "Only one ASO action is supported");
3564         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3565                 return rte_flow_error_set(error, EINVAL,
3566                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3567                                           "Encap cannot exist before CT");
3568         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3569                 return rte_flow_error_set(error, EINVAL,
3570                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3571                                           "Not a outer TCP packet");
3572         return 0;
3573 }
3574
3575 int
3576 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3577                              struct mlx5_list_entry *entry, void *cb_ctx)
3578 {
3579         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3580         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3581         struct mlx5_flow_dv_encap_decap_resource *resource;
3582
3583         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3584                                 entry);
3585         if (resource->reformat_type == ctx_resource->reformat_type &&
3586             resource->ft_type == ctx_resource->ft_type &&
3587             resource->flags == ctx_resource->flags &&
3588             resource->size == ctx_resource->size &&
3589             !memcmp((const void *)resource->buf,
3590                     (const void *)ctx_resource->buf,
3591                     resource->size))
3592                 return 0;
3593         return -1;
3594 }
3595
3596 struct mlx5_list_entry *
3597 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3598 {
3599         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3600         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3601         struct mlx5dv_dr_domain *domain;
3602         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3603         struct mlx5_flow_dv_encap_decap_resource *resource;
3604         uint32_t idx;
3605         int ret;
3606
3607         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3608                 domain = sh->fdb_domain;
3609         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3610                 domain = sh->rx_domain;
3611         else
3612                 domain = sh->tx_domain;
3613         /* Register new encap/decap resource. */
3614         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3615         if (!resource) {
3616                 rte_flow_error_set(ctx->error, ENOMEM,
3617                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3618                                    "cannot allocate resource memory");
3619                 return NULL;
3620         }
3621         *resource = *ctx_resource;
3622         resource->idx = idx;
3623         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3624                                                               domain, resource,
3625                                                              &resource->action);
3626         if (ret) {
3627                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3628                 rte_flow_error_set(ctx->error, ENOMEM,
3629                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3630                                    NULL, "cannot create action");
3631                 return NULL;
3632         }
3633
3634         return &resource->entry;
3635 }
3636
3637 struct mlx5_list_entry *
3638 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3639                              void *cb_ctx)
3640 {
3641         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3642         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3643         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3644         uint32_t idx;
3645
3646         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3647                                            &idx);
3648         if (!cache_resource) {
3649                 rte_flow_error_set(ctx->error, ENOMEM,
3650                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3651                                    "cannot allocate resource memory");
3652                 return NULL;
3653         }
3654         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3655         cache_resource->idx = idx;
3656         return &cache_resource->entry;
3657 }
3658
3659 void
3660 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3661 {
3662         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3663         struct mlx5_flow_dv_encap_decap_resource *res =
3664                                        container_of(entry, typeof(*res), entry);
3665
3666         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3667 }
3668
3669 /**
3670  * Find existing encap/decap resource or create and register a new one.
3671  *
3672  * @param[in, out] dev
3673  *   Pointer to rte_eth_dev structure.
3674  * @param[in, out] resource
3675  *   Pointer to encap/decap resource.
3676  * @parm[in, out] dev_flow
3677  *   Pointer to the dev_flow.
3678  * @param[out] error
3679  *   pointer to error structure.
3680  *
3681  * @return
3682  *   0 on success otherwise -errno and errno is set.
3683  */
3684 static int
3685 flow_dv_encap_decap_resource_register
3686                         (struct rte_eth_dev *dev,
3687                          struct mlx5_flow_dv_encap_decap_resource *resource,
3688                          struct mlx5_flow *dev_flow,
3689                          struct rte_flow_error *error)
3690 {
3691         struct mlx5_priv *priv = dev->data->dev_private;
3692         struct mlx5_dev_ctx_shared *sh = priv->sh;
3693         struct mlx5_list_entry *entry;
3694         union {
3695                 struct {
3696                         uint32_t ft_type:8;
3697                         uint32_t refmt_type:8;
3698                         /*
3699                          * Header reformat actions can be shared between
3700                          * non-root tables. One bit to indicate non-root
3701                          * table or not.
3702                          */
3703                         uint32_t is_root:1;
3704                         uint32_t reserve:15;
3705                 };
3706                 uint32_t v32;
3707         } encap_decap_key = {
3708                 {
3709                         .ft_type = resource->ft_type,
3710                         .refmt_type = resource->reformat_type,
3711                         .is_root = !!dev_flow->dv.group,
3712                         .reserve = 0,
3713                 }
3714         };
3715         struct mlx5_flow_cb_ctx ctx = {
3716                 .error = error,
3717                 .data = resource,
3718         };
3719         struct mlx5_hlist *encaps_decaps;
3720         uint64_t key64;
3721
3722         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3723                                 "encaps_decaps",
3724                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3725                                 true, true, sh,
3726                                 flow_dv_encap_decap_create_cb,
3727                                 flow_dv_encap_decap_match_cb,
3728                                 flow_dv_encap_decap_remove_cb,
3729                                 flow_dv_encap_decap_clone_cb,
3730                                 flow_dv_encap_decap_clone_free_cb);
3731         if (unlikely(!encaps_decaps))
3732                 return -rte_errno;
3733         resource->flags = dev_flow->dv.group ? 0 : 1;
3734         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3735                                  sizeof(encap_decap_key.v32), 0);
3736         if (resource->reformat_type !=
3737             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3738             resource->size)
3739                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3740         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3741         if (!entry)
3742                 return -rte_errno;
3743         resource = container_of(entry, typeof(*resource), entry);
3744         dev_flow->dv.encap_decap = resource;
3745         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3746         return 0;
3747 }
3748
3749 /**
3750  * Find existing table jump resource or create and register a new one.
3751  *
3752  * @param[in, out] dev
3753  *   Pointer to rte_eth_dev structure.
3754  * @param[in, out] tbl
3755  *   Pointer to flow table resource.
3756  * @parm[in, out] dev_flow
3757  *   Pointer to the dev_flow.
3758  * @param[out] error
3759  *   pointer to error structure.
3760  *
3761  * @return
3762  *   0 on success otherwise -errno and errno is set.
3763  */
3764 static int
3765 flow_dv_jump_tbl_resource_register
3766                         (struct rte_eth_dev *dev __rte_unused,
3767                          struct mlx5_flow_tbl_resource *tbl,
3768                          struct mlx5_flow *dev_flow,
3769                          struct rte_flow_error *error __rte_unused)
3770 {
3771         struct mlx5_flow_tbl_data_entry *tbl_data =
3772                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3773
3774         MLX5_ASSERT(tbl);
3775         MLX5_ASSERT(tbl_data->jump.action);
3776         dev_flow->handle->rix_jump = tbl_data->idx;
3777         dev_flow->dv.jump = &tbl_data->jump;
3778         return 0;
3779 }
3780
3781 int
3782 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3783                          struct mlx5_list_entry *entry, void *cb_ctx)
3784 {
3785         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3786         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3787         struct mlx5_flow_dv_port_id_action_resource *res =
3788                                        container_of(entry, typeof(*res), entry);
3789
3790         return ref->port_id != res->port_id;
3791 }
3792
3793 struct mlx5_list_entry *
3794 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3795 {
3796         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3797         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3798         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3799         struct mlx5_flow_dv_port_id_action_resource *resource;
3800         uint32_t idx;
3801         int ret;
3802
3803         /* Register new port id action resource. */
3804         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3805         if (!resource) {
3806                 rte_flow_error_set(ctx->error, ENOMEM,
3807                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3808                                    "cannot allocate port_id action memory");
3809                 return NULL;
3810         }
3811         *resource = *ref;
3812         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3813                                                         ref->port_id,
3814                                                         &resource->action);
3815         if (ret) {
3816                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3817                 rte_flow_error_set(ctx->error, ENOMEM,
3818                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3819                                    "cannot create action");
3820                 return NULL;
3821         }
3822         resource->idx = idx;
3823         return &resource->entry;
3824 }
3825
3826 struct mlx5_list_entry *
3827 flow_dv_port_id_clone_cb(void *tool_ctx,
3828                          struct mlx5_list_entry *entry __rte_unused,
3829                          void *cb_ctx)
3830 {
3831         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3832         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3833         struct mlx5_flow_dv_port_id_action_resource *resource;
3834         uint32_t idx;
3835
3836         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3837         if (!resource) {
3838                 rte_flow_error_set(ctx->error, ENOMEM,
3839                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3840                                    "cannot allocate port_id action memory");
3841                 return NULL;
3842         }
3843         memcpy(resource, entry, sizeof(*resource));
3844         resource->idx = idx;
3845         return &resource->entry;
3846 }
3847
3848 void
3849 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3850 {
3851         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3852         struct mlx5_flow_dv_port_id_action_resource *resource =
3853                                   container_of(entry, typeof(*resource), entry);
3854
3855         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3856 }
3857
3858 /**
3859  * Find existing table port ID resource or create and register a new one.
3860  *
3861  * @param[in, out] dev
3862  *   Pointer to rte_eth_dev structure.
3863  * @param[in, out] ref
3864  *   Pointer to port ID action resource reference.
3865  * @parm[in, out] dev_flow
3866  *   Pointer to the dev_flow.
3867  * @param[out] error
3868  *   pointer to error structure.
3869  *
3870  * @return
3871  *   0 on success otherwise -errno and errno is set.
3872  */
3873 static int
3874 flow_dv_port_id_action_resource_register
3875                         (struct rte_eth_dev *dev,
3876                          struct mlx5_flow_dv_port_id_action_resource *ref,
3877                          struct mlx5_flow *dev_flow,
3878                          struct rte_flow_error *error)
3879 {
3880         struct mlx5_priv *priv = dev->data->dev_private;
3881         struct mlx5_list_entry *entry;
3882         struct mlx5_flow_dv_port_id_action_resource *resource;
3883         struct mlx5_flow_cb_ctx ctx = {
3884                 .error = error,
3885                 .data = ref,
3886         };
3887
3888         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3889         if (!entry)
3890                 return -rte_errno;
3891         resource = container_of(entry, typeof(*resource), entry);
3892         dev_flow->dv.port_id_action = resource;
3893         dev_flow->handle->rix_port_id_action = resource->idx;
3894         return 0;
3895 }
3896
3897 int
3898 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3899                            struct mlx5_list_entry *entry, void *cb_ctx)
3900 {
3901         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3902         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3903         struct mlx5_flow_dv_push_vlan_action_resource *res =
3904                                        container_of(entry, typeof(*res), entry);
3905
3906         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3907 }
3908
3909 struct mlx5_list_entry *
3910 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3911 {
3912         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3913         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3914         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3915         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3916         struct mlx5dv_dr_domain *domain;
3917         uint32_t idx;
3918         int ret;
3919
3920         /* Register new port id action resource. */
3921         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3922         if (!resource) {
3923                 rte_flow_error_set(ctx->error, ENOMEM,
3924                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3925                                    "cannot allocate push_vlan action memory");
3926                 return NULL;
3927         }
3928         *resource = *ref;
3929         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3930                 domain = sh->fdb_domain;
3931         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3932                 domain = sh->rx_domain;
3933         else
3934                 domain = sh->tx_domain;
3935         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3936                                                         &resource->action);
3937         if (ret) {
3938                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3939                 rte_flow_error_set(ctx->error, ENOMEM,
3940                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3941                                    "cannot create push vlan action");
3942                 return NULL;
3943         }
3944         resource->idx = idx;
3945         return &resource->entry;
3946 }
3947
3948 struct mlx5_list_entry *
3949 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3950                            struct mlx5_list_entry *entry __rte_unused,
3951                            void *cb_ctx)
3952 {
3953         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3954         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3955         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3956         uint32_t idx;
3957
3958         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3959         if (!resource) {
3960                 rte_flow_error_set(ctx->error, ENOMEM,
3961                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3962                                    "cannot allocate push_vlan action memory");
3963                 return NULL;
3964         }
3965         memcpy(resource, entry, sizeof(*resource));
3966         resource->idx = idx;
3967         return &resource->entry;
3968 }
3969
3970 void
3971 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3972 {
3973         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3974         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3975                                   container_of(entry, typeof(*resource), entry);
3976
3977         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3978 }
3979
3980 /**
3981  * Find existing push vlan resource or create and register a new one.
3982  *
3983  * @param [in, out] dev
3984  *   Pointer to rte_eth_dev structure.
3985  * @param[in, out] ref
3986  *   Pointer to port ID action resource reference.
3987  * @parm[in, out] dev_flow
3988  *   Pointer to the dev_flow.
3989  * @param[out] error
3990  *   pointer to error structure.
3991  *
3992  * @return
3993  *   0 on success otherwise -errno and errno is set.
3994  */
3995 static int
3996 flow_dv_push_vlan_action_resource_register
3997                        (struct rte_eth_dev *dev,
3998                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
3999                         struct mlx5_flow *dev_flow,
4000                         struct rte_flow_error *error)
4001 {
4002         struct mlx5_priv *priv = dev->data->dev_private;
4003         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4004         struct mlx5_list_entry *entry;
4005         struct mlx5_flow_cb_ctx ctx = {
4006                 .error = error,
4007                 .data = ref,
4008         };
4009
4010         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4011         if (!entry)
4012                 return -rte_errno;
4013         resource = container_of(entry, typeof(*resource), entry);
4014
4015         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4016         dev_flow->dv.push_vlan_res = resource;
4017         return 0;
4018 }
4019
4020 /**
4021  * Get the size of specific rte_flow_item_type hdr size
4022  *
4023  * @param[in] item_type
4024  *   Tested rte_flow_item_type.
4025  *
4026  * @return
4027  *   sizeof struct item_type, 0 if void or irrelevant.
4028  */
4029 static size_t
4030 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4031 {
4032         size_t retval;
4033
4034         switch (item_type) {
4035         case RTE_FLOW_ITEM_TYPE_ETH:
4036                 retval = sizeof(struct rte_ether_hdr);
4037                 break;
4038         case RTE_FLOW_ITEM_TYPE_VLAN:
4039                 retval = sizeof(struct rte_vlan_hdr);
4040                 break;
4041         case RTE_FLOW_ITEM_TYPE_IPV4:
4042                 retval = sizeof(struct rte_ipv4_hdr);
4043                 break;
4044         case RTE_FLOW_ITEM_TYPE_IPV6:
4045                 retval = sizeof(struct rte_ipv6_hdr);
4046                 break;
4047         case RTE_FLOW_ITEM_TYPE_UDP:
4048                 retval = sizeof(struct rte_udp_hdr);
4049                 break;
4050         case RTE_FLOW_ITEM_TYPE_TCP:
4051                 retval = sizeof(struct rte_tcp_hdr);
4052                 break;
4053         case RTE_FLOW_ITEM_TYPE_VXLAN:
4054         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4055                 retval = sizeof(struct rte_vxlan_hdr);
4056                 break;
4057         case RTE_FLOW_ITEM_TYPE_GRE:
4058         case RTE_FLOW_ITEM_TYPE_NVGRE:
4059                 retval = sizeof(struct rte_gre_hdr);
4060                 break;
4061         case RTE_FLOW_ITEM_TYPE_MPLS:
4062                 retval = sizeof(struct rte_mpls_hdr);
4063                 break;
4064         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4065         default:
4066                 retval = 0;
4067                 break;
4068         }
4069         return retval;
4070 }
4071
4072 #define MLX5_ENCAP_IPV4_VERSION         0x40
4073 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4074 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4075 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4076 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4077 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4078 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4079
4080 /**
4081  * Convert the encap action data from list of rte_flow_item to raw buffer
4082  *
4083  * @param[in] items
4084  *   Pointer to rte_flow_item objects list.
4085  * @param[out] buf
4086  *   Pointer to the output buffer.
4087  * @param[out] size
4088  *   Pointer to the output buffer size.
4089  * @param[out] error
4090  *   Pointer to the error structure.
4091  *
4092  * @return
4093  *   0 on success, a negative errno value otherwise and rte_errno is set.
4094  */
4095 static int
4096 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4097                            size_t *size, struct rte_flow_error *error)
4098 {
4099         struct rte_ether_hdr *eth = NULL;
4100         struct rte_vlan_hdr *vlan = NULL;
4101         struct rte_ipv4_hdr *ipv4 = NULL;
4102         struct rte_ipv6_hdr *ipv6 = NULL;
4103         struct rte_udp_hdr *udp = NULL;
4104         struct rte_vxlan_hdr *vxlan = NULL;
4105         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4106         struct rte_gre_hdr *gre = NULL;
4107         size_t len;
4108         size_t temp_size = 0;
4109
4110         if (!items)
4111                 return rte_flow_error_set(error, EINVAL,
4112                                           RTE_FLOW_ERROR_TYPE_ACTION,
4113                                           NULL, "invalid empty data");
4114         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4115                 len = flow_dv_get_item_hdr_len(items->type);
4116                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4117                         return rte_flow_error_set(error, EINVAL,
4118                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4119                                                   (void *)items->type,
4120                                                   "items total size is too big"
4121                                                   " for encap action");
4122                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4123                 switch (items->type) {
4124                 case RTE_FLOW_ITEM_TYPE_ETH:
4125                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4126                         break;
4127                 case RTE_FLOW_ITEM_TYPE_VLAN:
4128                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4129                         if (!eth)
4130                                 return rte_flow_error_set(error, EINVAL,
4131                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4132                                                 (void *)items->type,
4133                                                 "eth header not found");
4134                         if (!eth->ether_type)
4135                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4136                         break;
4137                 case RTE_FLOW_ITEM_TYPE_IPV4:
4138                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4139                         if (!vlan && !eth)
4140                                 return rte_flow_error_set(error, EINVAL,
4141                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4142                                                 (void *)items->type,
4143                                                 "neither eth nor vlan"
4144                                                 " header found");
4145                         if (vlan && !vlan->eth_proto)
4146                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4147                         else if (eth && !eth->ether_type)
4148                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4149                         if (!ipv4->version_ihl)
4150                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4151                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4152                         if (!ipv4->time_to_live)
4153                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4154                         break;
4155                 case RTE_FLOW_ITEM_TYPE_IPV6:
4156                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4157                         if (!vlan && !eth)
4158                                 return rte_flow_error_set(error, EINVAL,
4159                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4160                                                 (void *)items->type,
4161                                                 "neither eth nor vlan"
4162                                                 " header found");
4163                         if (vlan && !vlan->eth_proto)
4164                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4165                         else if (eth && !eth->ether_type)
4166                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4167                         if (!ipv6->vtc_flow)
4168                                 ipv6->vtc_flow =
4169                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4170                         if (!ipv6->hop_limits)
4171                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4172                         break;
4173                 case RTE_FLOW_ITEM_TYPE_UDP:
4174                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4175                         if (!ipv4 && !ipv6)
4176                                 return rte_flow_error_set(error, EINVAL,
4177                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4178                                                 (void *)items->type,
4179                                                 "ip header not found");
4180                         if (ipv4 && !ipv4->next_proto_id)
4181                                 ipv4->next_proto_id = IPPROTO_UDP;
4182                         else if (ipv6 && !ipv6->proto)
4183                                 ipv6->proto = IPPROTO_UDP;
4184                         break;
4185                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4186                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4187                         if (!udp)
4188                                 return rte_flow_error_set(error, EINVAL,
4189                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4190                                                 (void *)items->type,
4191                                                 "udp header not found");
4192                         if (!udp->dst_port)
4193                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4194                         if (!vxlan->vx_flags)
4195                                 vxlan->vx_flags =
4196                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4197                         break;
4198                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4199                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4200                         if (!udp)
4201                                 return rte_flow_error_set(error, EINVAL,
4202                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4203                                                 (void *)items->type,
4204                                                 "udp header not found");
4205                         if (!vxlan_gpe->proto)
4206                                 return rte_flow_error_set(error, EINVAL,
4207                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4208                                                 (void *)items->type,
4209                                                 "next protocol not found");
4210                         if (!udp->dst_port)
4211                                 udp->dst_port =
4212                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4213                         if (!vxlan_gpe->vx_flags)
4214                                 vxlan_gpe->vx_flags =
4215                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4216                         break;
4217                 case RTE_FLOW_ITEM_TYPE_GRE:
4218                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4219                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4220                         if (!gre->proto)
4221                                 return rte_flow_error_set(error, EINVAL,
4222                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4223                                                 (void *)items->type,
4224                                                 "next protocol not found");
4225                         if (!ipv4 && !ipv6)
4226                                 return rte_flow_error_set(error, EINVAL,
4227                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4228                                                 (void *)items->type,
4229                                                 "ip header not found");
4230                         if (ipv4 && !ipv4->next_proto_id)
4231                                 ipv4->next_proto_id = IPPROTO_GRE;
4232                         else if (ipv6 && !ipv6->proto)
4233                                 ipv6->proto = IPPROTO_GRE;
4234                         break;
4235                 case RTE_FLOW_ITEM_TYPE_VOID:
4236                         break;
4237                 default:
4238                         return rte_flow_error_set(error, EINVAL,
4239                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4240                                                   (void *)items->type,
4241                                                   "unsupported item type");
4242                         break;
4243                 }
4244                 temp_size += len;
4245         }
4246         *size = temp_size;
4247         return 0;
4248 }
4249
4250 static int
4251 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4252 {
4253         struct rte_ether_hdr *eth = NULL;
4254         struct rte_vlan_hdr *vlan = NULL;
4255         struct rte_ipv6_hdr *ipv6 = NULL;
4256         struct rte_udp_hdr *udp = NULL;
4257         char *next_hdr;
4258         uint16_t proto;
4259
4260         eth = (struct rte_ether_hdr *)data;
4261         next_hdr = (char *)(eth + 1);
4262         proto = RTE_BE16(eth->ether_type);
4263
4264         /* VLAN skipping */
4265         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4266                 vlan = (struct rte_vlan_hdr *)next_hdr;
4267                 proto = RTE_BE16(vlan->eth_proto);
4268                 next_hdr += sizeof(struct rte_vlan_hdr);
4269         }
4270
4271         /* HW calculates IPv4 csum. no need to proceed */
4272         if (proto == RTE_ETHER_TYPE_IPV4)
4273                 return 0;
4274
4275         /* non IPv4/IPv6 header. not supported */
4276         if (proto != RTE_ETHER_TYPE_IPV6) {
4277                 return rte_flow_error_set(error, ENOTSUP,
4278                                           RTE_FLOW_ERROR_TYPE_ACTION,
4279                                           NULL, "Cannot offload non IPv4/IPv6");
4280         }
4281
4282         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4283
4284         /* ignore non UDP */
4285         if (ipv6->proto != IPPROTO_UDP)
4286                 return 0;
4287
4288         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4289         udp->dgram_cksum = 0;
4290
4291         return 0;
4292 }
4293
4294 /**
4295  * Convert L2 encap action to DV specification.
4296  *
4297  * @param[in] dev
4298  *   Pointer to rte_eth_dev structure.
4299  * @param[in] action
4300  *   Pointer to action structure.
4301  * @param[in, out] dev_flow
4302  *   Pointer to the mlx5_flow.
4303  * @param[in] transfer
4304  *   Mark if the flow is E-Switch flow.
4305  * @param[out] error
4306  *   Pointer to the error structure.
4307  *
4308  * @return
4309  *   0 on success, a negative errno value otherwise and rte_errno is set.
4310  */
4311 static int
4312 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4313                                const struct rte_flow_action *action,
4314                                struct mlx5_flow *dev_flow,
4315                                uint8_t transfer,
4316                                struct rte_flow_error *error)
4317 {
4318         const struct rte_flow_item *encap_data;
4319         const struct rte_flow_action_raw_encap *raw_encap_data;
4320         struct mlx5_flow_dv_encap_decap_resource res = {
4321                 .reformat_type =
4322                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4323                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4324                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4325         };
4326
4327         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4328                 raw_encap_data =
4329                         (const struct rte_flow_action_raw_encap *)action->conf;
4330                 res.size = raw_encap_data->size;
4331                 memcpy(res.buf, raw_encap_data->data, res.size);
4332         } else {
4333                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4334                         encap_data =
4335                                 ((const struct rte_flow_action_vxlan_encap *)
4336                                                 action->conf)->definition;
4337                 else
4338                         encap_data =
4339                                 ((const struct rte_flow_action_nvgre_encap *)
4340                                                 action->conf)->definition;
4341                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4342                                                &res.size, error))
4343                         return -rte_errno;
4344         }
4345         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4346                 return -rte_errno;
4347         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4348                 return rte_flow_error_set(error, EINVAL,
4349                                           RTE_FLOW_ERROR_TYPE_ACTION,
4350                                           NULL, "can't create L2 encap action");
4351         return 0;
4352 }
4353
4354 /**
4355  * Convert L2 decap action to DV specification.
4356  *
4357  * @param[in] dev
4358  *   Pointer to rte_eth_dev structure.
4359  * @param[in, out] dev_flow
4360  *   Pointer to the mlx5_flow.
4361  * @param[in] transfer
4362  *   Mark if the flow is E-Switch flow.
4363  * @param[out] error
4364  *   Pointer to the error structure.
4365  *
4366  * @return
4367  *   0 on success, a negative errno value otherwise and rte_errno is set.
4368  */
4369 static int
4370 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4371                                struct mlx5_flow *dev_flow,
4372                                uint8_t transfer,
4373                                struct rte_flow_error *error)
4374 {
4375         struct mlx5_flow_dv_encap_decap_resource res = {
4376                 .size = 0,
4377                 .reformat_type =
4378                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4379                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4380                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4381         };
4382
4383         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4384                 return rte_flow_error_set(error, EINVAL,
4385                                           RTE_FLOW_ERROR_TYPE_ACTION,
4386                                           NULL, "can't create L2 decap action");
4387         return 0;
4388 }
4389
4390 /**
4391  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4392  *
4393  * @param[in] dev
4394  *   Pointer to rte_eth_dev structure.
4395  * @param[in] action
4396  *   Pointer to action structure.
4397  * @param[in, out] dev_flow
4398  *   Pointer to the mlx5_flow.
4399  * @param[in] attr
4400  *   Pointer to the flow attributes.
4401  * @param[out] error
4402  *   Pointer to the error structure.
4403  *
4404  * @return
4405  *   0 on success, a negative errno value otherwise and rte_errno is set.
4406  */
4407 static int
4408 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4409                                 const struct rte_flow_action *action,
4410                                 struct mlx5_flow *dev_flow,
4411                                 const struct rte_flow_attr *attr,
4412                                 struct rte_flow_error *error)
4413 {
4414         const struct rte_flow_action_raw_encap *encap_data;
4415         struct mlx5_flow_dv_encap_decap_resource res;
4416
4417         memset(&res, 0, sizeof(res));
4418         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4419         res.size = encap_data->size;
4420         memcpy(res.buf, encap_data->data, res.size);
4421         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4422                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4423                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4424         if (attr->transfer)
4425                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4426         else
4427                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4428                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4429         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4430                 return rte_flow_error_set(error, EINVAL,
4431                                           RTE_FLOW_ERROR_TYPE_ACTION,
4432                                           NULL, "can't create encap action");
4433         return 0;
4434 }
4435
4436 /**
4437  * Create action push VLAN.
4438  *
4439  * @param[in] dev
4440  *   Pointer to rte_eth_dev structure.
4441  * @param[in] attr
4442  *   Pointer to the flow attributes.
4443  * @param[in] vlan
4444  *   Pointer to the vlan to push to the Ethernet header.
4445  * @param[in, out] dev_flow
4446  *   Pointer to the mlx5_flow.
4447  * @param[out] error
4448  *   Pointer to the error structure.
4449  *
4450  * @return
4451  *   0 on success, a negative errno value otherwise and rte_errno is set.
4452  */
4453 static int
4454 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4455                                 const struct rte_flow_attr *attr,
4456                                 const struct rte_vlan_hdr *vlan,
4457                                 struct mlx5_flow *dev_flow,
4458                                 struct rte_flow_error *error)
4459 {
4460         struct mlx5_flow_dv_push_vlan_action_resource res;
4461
4462         memset(&res, 0, sizeof(res));
4463         res.vlan_tag =
4464                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4465                                  vlan->vlan_tci);
4466         if (attr->transfer)
4467                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4468         else
4469                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4470                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4471         return flow_dv_push_vlan_action_resource_register
4472                                             (dev, &res, dev_flow, error);
4473 }
4474
4475 /**
4476  * Validate the modify-header actions.
4477  *
4478  * @param[in] action_flags
4479  *   Holds the actions detected until now.
4480  * @param[in] action
4481  *   Pointer to the modify action.
4482  * @param[out] error
4483  *   Pointer to error structure.
4484  *
4485  * @return
4486  *   0 on success, a negative errno value otherwise and rte_errno is set.
4487  */
4488 static int
4489 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4490                                    const struct rte_flow_action *action,
4491                                    struct rte_flow_error *error)
4492 {
4493         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4494                 return rte_flow_error_set(error, EINVAL,
4495                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4496                                           NULL, "action configuration not set");
4497         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4498                 return rte_flow_error_set(error, EINVAL,
4499                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4500                                           "can't have encap action before"
4501                                           " modify action");
4502         return 0;
4503 }
4504
4505 /**
4506  * Validate the modify-header MAC address actions.
4507  *
4508  * @param[in] action_flags
4509  *   Holds the actions detected until now.
4510  * @param[in] action
4511  *   Pointer to the modify action.
4512  * @param[in] item_flags
4513  *   Holds the items detected.
4514  * @param[out] error
4515  *   Pointer to error structure.
4516  *
4517  * @return
4518  *   0 on success, a negative errno value otherwise and rte_errno is set.
4519  */
4520 static int
4521 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4522                                    const struct rte_flow_action *action,
4523                                    const uint64_t item_flags,
4524                                    struct rte_flow_error *error)
4525 {
4526         int ret = 0;
4527
4528         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4529         if (!ret) {
4530                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4531                         return rte_flow_error_set(error, EINVAL,
4532                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4533                                                   NULL,
4534                                                   "no L2 item in pattern");
4535         }
4536         return ret;
4537 }
4538
4539 /**
4540  * Validate the modify-header IPv4 address actions.
4541  *
4542  * @param[in] action_flags
4543  *   Holds the actions detected until now.
4544  * @param[in] action
4545  *   Pointer to the modify action.
4546  * @param[in] item_flags
4547  *   Holds the items detected.
4548  * @param[out] error
4549  *   Pointer to error structure.
4550  *
4551  * @return
4552  *   0 on success, a negative errno value otherwise and rte_errno is set.
4553  */
4554 static int
4555 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4556                                     const struct rte_flow_action *action,
4557                                     const uint64_t item_flags,
4558                                     struct rte_flow_error *error)
4559 {
4560         int ret = 0;
4561         uint64_t layer;
4562
4563         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4564         if (!ret) {
4565                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4566                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4567                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4568                 if (!(item_flags & layer))
4569                         return rte_flow_error_set(error, EINVAL,
4570                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4571                                                   NULL,
4572                                                   "no ipv4 item in pattern");
4573         }
4574         return ret;
4575 }
4576
4577 /**
4578  * Validate the modify-header IPv6 address actions.
4579  *
4580  * @param[in] action_flags
4581  *   Holds the actions detected until now.
4582  * @param[in] action
4583  *   Pointer to the modify action.
4584  * @param[in] item_flags
4585  *   Holds the items detected.
4586  * @param[out] error
4587  *   Pointer to error structure.
4588  *
4589  * @return
4590  *   0 on success, a negative errno value otherwise and rte_errno is set.
4591  */
4592 static int
4593 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4594                                     const struct rte_flow_action *action,
4595                                     const uint64_t item_flags,
4596                                     struct rte_flow_error *error)
4597 {
4598         int ret = 0;
4599         uint64_t layer;
4600
4601         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4602         if (!ret) {
4603                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4604                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4605                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4606                 if (!(item_flags & layer))
4607                         return rte_flow_error_set(error, EINVAL,
4608                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4609                                                   NULL,
4610                                                   "no ipv6 item in pattern");
4611         }
4612         return ret;
4613 }
4614
4615 /**
4616  * Validate the modify-header TP actions.
4617  *
4618  * @param[in] action_flags
4619  *   Holds the actions detected until now.
4620  * @param[in] action
4621  *   Pointer to the modify action.
4622  * @param[in] item_flags
4623  *   Holds the items detected.
4624  * @param[out] error
4625  *   Pointer to error structure.
4626  *
4627  * @return
4628  *   0 on success, a negative errno value otherwise and rte_errno is set.
4629  */
4630 static int
4631 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4632                                   const struct rte_flow_action *action,
4633                                   const uint64_t item_flags,
4634                                   struct rte_flow_error *error)
4635 {
4636         int ret = 0;
4637         uint64_t layer;
4638
4639         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4640         if (!ret) {
4641                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4642                                  MLX5_FLOW_LAYER_INNER_L4 :
4643                                  MLX5_FLOW_LAYER_OUTER_L4;
4644                 if (!(item_flags & layer))
4645                         return rte_flow_error_set(error, EINVAL,
4646                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4647                                                   NULL, "no transport layer "
4648                                                   "in pattern");
4649         }
4650         return ret;
4651 }
4652
4653 /**
4654  * Validate the modify-header actions of increment/decrement
4655  * TCP Sequence-number.
4656  *
4657  * @param[in] action_flags
4658  *   Holds the actions detected until now.
4659  * @param[in] action
4660  *   Pointer to the modify action.
4661  * @param[in] item_flags
4662  *   Holds the items detected.
4663  * @param[out] error
4664  *   Pointer to error structure.
4665  *
4666  * @return
4667  *   0 on success, a negative errno value otherwise and rte_errno is set.
4668  */
4669 static int
4670 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4671                                        const struct rte_flow_action *action,
4672                                        const uint64_t item_flags,
4673                                        struct rte_flow_error *error)
4674 {
4675         int ret = 0;
4676         uint64_t layer;
4677
4678         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4679         if (!ret) {
4680                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4681                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4682                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4683                 if (!(item_flags & layer))
4684                         return rte_flow_error_set(error, EINVAL,
4685                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4686                                                   NULL, "no TCP item in"
4687                                                   " pattern");
4688                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4689                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4690                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4691                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4692                         return rte_flow_error_set(error, EINVAL,
4693                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4694                                                   NULL,
4695                                                   "cannot decrease and increase"
4696                                                   " TCP sequence number"
4697                                                   " at the same time");
4698         }
4699         return ret;
4700 }
4701
4702 /**
4703  * Validate the modify-header actions of increment/decrement
4704  * TCP Acknowledgment number.
4705  *
4706  * @param[in] action_flags
4707  *   Holds the actions detected until now.
4708  * @param[in] action
4709  *   Pointer to the modify action.
4710  * @param[in] item_flags
4711  *   Holds the items detected.
4712  * @param[out] error
4713  *   Pointer to error structure.
4714  *
4715  * @return
4716  *   0 on success, a negative errno value otherwise and rte_errno is set.
4717  */
4718 static int
4719 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4720                                        const struct rte_flow_action *action,
4721                                        const uint64_t item_flags,
4722                                        struct rte_flow_error *error)
4723 {
4724         int ret = 0;
4725         uint64_t layer;
4726
4727         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4728         if (!ret) {
4729                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4730                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4731                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4732                 if (!(item_flags & layer))
4733                         return rte_flow_error_set(error, EINVAL,
4734                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4735                                                   NULL, "no TCP item in"
4736                                                   " pattern");
4737                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4738                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4739                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4740                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4741                         return rte_flow_error_set(error, EINVAL,
4742                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4743                                                   NULL,
4744                                                   "cannot decrease and increase"
4745                                                   " TCP acknowledgment number"
4746                                                   " at the same time");
4747         }
4748         return ret;
4749 }
4750
4751 /**
4752  * Validate the modify-header TTL actions.
4753  *
4754  * @param[in] action_flags
4755  *   Holds the actions detected until now.
4756  * @param[in] action
4757  *   Pointer to the modify action.
4758  * @param[in] item_flags
4759  *   Holds the items detected.
4760  * @param[out] error
4761  *   Pointer to error structure.
4762  *
4763  * @return
4764  *   0 on success, a negative errno value otherwise and rte_errno is set.
4765  */
4766 static int
4767 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4768                                    const struct rte_flow_action *action,
4769                                    const uint64_t item_flags,
4770                                    struct rte_flow_error *error)
4771 {
4772         int ret = 0;
4773         uint64_t layer;
4774
4775         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4776         if (!ret) {
4777                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4778                                  MLX5_FLOW_LAYER_INNER_L3 :
4779                                  MLX5_FLOW_LAYER_OUTER_L3;
4780                 if (!(item_flags & layer))
4781                         return rte_flow_error_set(error, EINVAL,
4782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4783                                                   NULL,
4784                                                   "no IP protocol in pattern");
4785         }
4786         return ret;
4787 }
4788
4789 /**
4790  * Validate the generic modify field actions.
4791  * @param[in] dev
4792  *   Pointer to the rte_eth_dev structure.
4793  * @param[in] action_flags
4794  *   Holds the actions detected until now.
4795  * @param[in] action
4796  *   Pointer to the modify action.
4797  * @param[in] attr
4798  *   Pointer to the flow attributes.
4799  * @param[out] error
4800  *   Pointer to error structure.
4801  *
4802  * @return
4803  *   Number of header fields to modify (0 or more) on success,
4804  *   a negative errno value otherwise and rte_errno is set.
4805  */
4806 static int
4807 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4808                                    const uint64_t action_flags,
4809                                    const struct rte_flow_action *action,
4810                                    const struct rte_flow_attr *attr,
4811                                    struct rte_flow_error *error)
4812 {
4813         int ret = 0;
4814         struct mlx5_priv *priv = dev->data->dev_private;
4815         struct mlx5_sh_config *config = &priv->sh->config;
4816         const struct rte_flow_action_modify_field *action_modify_field =
4817                 action->conf;
4818         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4819                                 action_modify_field->dst.field,
4820                                 -1, attr, error);
4821         uint32_t src_width = mlx5_flow_item_field_width(dev,
4822                                 action_modify_field->src.field,
4823                                 dst_width, attr, error);
4824
4825         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4826         if (ret)
4827                 return ret;
4828
4829         if (action_modify_field->width == 0)
4830                 return rte_flow_error_set(error, EINVAL,
4831                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4832                                 "no bits are requested to be modified");
4833         else if (action_modify_field->width > dst_width ||
4834                  action_modify_field->width > src_width)
4835                 return rte_flow_error_set(error, EINVAL,
4836                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                 "cannot modify more bits than"
4838                                 " the width of a field");
4839         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4840             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4841                 if ((action_modify_field->dst.offset +
4842                      action_modify_field->width > dst_width) ||
4843                     (action_modify_field->dst.offset % 32))
4844                         return rte_flow_error_set(error, EINVAL,
4845                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4846                                         "destination offset is too big"
4847                                         " or not aligned to 4 bytes");
4848                 if (action_modify_field->dst.level &&
4849                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4850                         return rte_flow_error_set(error, ENOTSUP,
4851                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4852                                         "inner header fields modification"
4853                                         " is not supported");
4854         }
4855         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4856             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4857                 if (!attr->transfer && !attr->group)
4858                         return rte_flow_error_set(error, ENOTSUP,
4859                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4860                                         "modify field action is not"
4861                                         " supported for group 0");
4862                 if ((action_modify_field->src.offset +
4863                      action_modify_field->width > src_width) ||
4864                     (action_modify_field->src.offset % 32))
4865                         return rte_flow_error_set(error, EINVAL,
4866                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4867                                         "source offset is too big"
4868                                         " or not aligned to 4 bytes");
4869                 if (action_modify_field->src.level &&
4870                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4871                         return rte_flow_error_set(error, ENOTSUP,
4872                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4873                                         "inner header fields modification"
4874                                         " is not supported");
4875         }
4876         if ((action_modify_field->dst.field ==
4877              action_modify_field->src.field) &&
4878             (action_modify_field->dst.level ==
4879              action_modify_field->src.level))
4880                 return rte_flow_error_set(error, EINVAL,
4881                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4882                                 "source and destination fields"
4883                                 " cannot be the same");
4884         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4885             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4886             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4887                 return rte_flow_error_set(error, EINVAL,
4888                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4889                                 "mark, immediate value or a pointer to it"
4890                                 " cannot be used as a destination");
4891         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4892             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4893                 return rte_flow_error_set(error, ENOTSUP,
4894                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4895                                 "modifications of an arbitrary"
4896                                 " place in a packet is not supported");
4897         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4898             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4899                 return rte_flow_error_set(error, ENOTSUP,
4900                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4901                                 "modifications of the 802.1Q Tag"
4902                                 " Identifier is not supported");
4903         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4904             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4905                 return rte_flow_error_set(error, ENOTSUP,
4906                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4907                                 "modifications of the VXLAN Network"
4908                                 " Identifier is not supported");
4909         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4910             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4911                 return rte_flow_error_set(error, ENOTSUP,
4912                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4913                                 "modifications of the GENEVE Network"
4914                                 " Identifier is not supported");
4915         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4916             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4917                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4918                     !mlx5_flow_ext_mreg_supported(dev))
4919                         return rte_flow_error_set(error, ENOTSUP,
4920                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4921                                         "cannot modify mark in legacy mode"
4922                                         " or without extensive registers");
4923         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4924             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4925                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4926                     !mlx5_flow_ext_mreg_supported(dev))
4927                         return rte_flow_error_set(error, ENOTSUP,
4928                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4929                                         "cannot modify meta without"
4930                                         " extensive registers support");
4931                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4932                 if (ret < 0 || ret == REG_NON)
4933                         return rte_flow_error_set(error, ENOTSUP,
4934                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4935                                         "cannot modify meta without"
4936                                         " extensive registers available");
4937         }
4938         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4939                 return rte_flow_error_set(error, ENOTSUP,
4940                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4941                                 "add and sub operations"
4942                                 " are not supported");
4943         return (action_modify_field->width / 32) +
4944                !!(action_modify_field->width % 32);
4945 }
4946
4947 /**
4948  * Validate jump action.
4949  *
4950  * @param[in] action
4951  *   Pointer to the jump action.
4952  * @param[in] action_flags
4953  *   Holds the actions detected until now.
4954  * @param[in] attributes
4955  *   Pointer to flow attributes
4956  * @param[in] external
4957  *   Action belongs to flow rule created by request external to PMD.
4958  * @param[out] error
4959  *   Pointer to error structure.
4960  *
4961  * @return
4962  *   0 on success, a negative errno value otherwise and rte_errno is set.
4963  */
4964 static int
4965 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4966                              const struct mlx5_flow_tunnel *tunnel,
4967                              const struct rte_flow_action *action,
4968                              uint64_t action_flags,
4969                              const struct rte_flow_attr *attributes,
4970                              bool external, struct rte_flow_error *error)
4971 {
4972         uint32_t target_group, table = 0;
4973         int ret = 0;
4974         struct flow_grp_info grp_info = {
4975                 .external = !!external,
4976                 .transfer = !!attributes->transfer,
4977                 .fdb_def_rule = 1,
4978                 .std_tbl_fix = 0
4979         };
4980         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4981                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4982                 return rte_flow_error_set(error, EINVAL,
4983                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4984                                           "can't have 2 fate actions in"
4985                                           " same flow");
4986         if (!action->conf)
4987                 return rte_flow_error_set(error, EINVAL,
4988                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4989                                           NULL, "action configuration not set");
4990         target_group =
4991                 ((const struct rte_flow_action_jump *)action->conf)->group;
4992         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4993                                        &grp_info, error);
4994         if (ret)
4995                 return ret;
4996         if (attributes->group == target_group &&
4997             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4998                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4999                 return rte_flow_error_set(error, EINVAL,
5000                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5001                                           "target group must be other than"
5002                                           " the current flow group");
5003         if (table == 0)
5004                 return rte_flow_error_set(error, EINVAL,
5005                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5006                                           NULL, "root table shouldn't be destination");
5007         return 0;
5008 }
5009
5010 /*
5011  * Validate action PORT_ID / REPRESENTED_PORT.
5012  *
5013  * @param[in] dev
5014  *   Pointer to rte_eth_dev structure.
5015  * @param[in] action_flags
5016  *   Bit-fields that holds the actions detected until now.
5017  * @param[in] action
5018  *   PORT_ID / REPRESENTED_PORT action structure.
5019  * @param[in] attr
5020  *   Attributes of flow that includes this action.
5021  * @param[out] error
5022  *   Pointer to error structure.
5023  *
5024  * @return
5025  *   0 on success, a negative errno value otherwise and rte_errno is set.
5026  */
5027 static int
5028 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5029                                 uint64_t action_flags,
5030                                 const struct rte_flow_action *action,
5031                                 const struct rte_flow_attr *attr,
5032                                 struct rte_flow_error *error)
5033 {
5034         const struct rte_flow_action_port_id *port_id;
5035         const struct rte_flow_action_ethdev *ethdev;
5036         struct mlx5_priv *act_priv;
5037         struct mlx5_priv *dev_priv;
5038         uint16_t port;
5039
5040         if (!attr->transfer)
5041                 return rte_flow_error_set(error, ENOTSUP,
5042                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5043                                           NULL,
5044                                           "port action is valid in transfer"
5045                                           " mode only");
5046         if (!action || !action->conf)
5047                 return rte_flow_error_set(error, ENOTSUP,
5048                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5049                                           NULL,
5050                                           "port action parameters must be"
5051                                           " specified");
5052         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5053                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5054                 return rte_flow_error_set(error, EINVAL,
5055                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5056                                           "can have only one fate actions in"
5057                                           " a flow");
5058         dev_priv = mlx5_dev_to_eswitch_info(dev);
5059         if (!dev_priv)
5060                 return rte_flow_error_set(error, rte_errno,
5061                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5062                                           NULL,
5063                                           "failed to obtain E-Switch info");
5064         switch (action->type) {
5065         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5066                 port_id = action->conf;
5067                 port = port_id->original ? dev->data->port_id : port_id->id;
5068                 break;
5069         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5070                 ethdev = action->conf;
5071                 port = ethdev->port_id;
5072                 break;
5073         default:
5074                 MLX5_ASSERT(false);
5075                 return rte_flow_error_set
5076                                 (error, EINVAL,
5077                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5078                                  "unknown E-Switch action");
5079         }
5080         act_priv = mlx5_port_to_eswitch_info(port, false);
5081         if (!act_priv)
5082                 return rte_flow_error_set
5083                                 (error, rte_errno,
5084                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5085                                  "failed to obtain E-Switch port id for port");
5086         if (act_priv->domain_id != dev_priv->domain_id)
5087                 return rte_flow_error_set
5088                                 (error, EINVAL,
5089                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5090                                  "port does not belong to"
5091                                  " E-Switch being configured");
5092         return 0;
5093 }
5094
5095 /**
5096  * Get the maximum number of modify header actions.
5097  *
5098  * @param dev
5099  *   Pointer to rte_eth_dev structure.
5100  * @param root
5101  *   Whether action is on root table.
5102  *
5103  * @return
5104  *   Max number of modify header actions device can support.
5105  */
5106 static inline unsigned int
5107 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5108                               bool root)
5109 {
5110         /*
5111          * There's no way to directly query the max capacity from FW.
5112          * The maximal value on root table should be assumed to be supported.
5113          */
5114         if (!root)
5115                 return MLX5_MAX_MODIFY_NUM;
5116         else
5117                 return MLX5_ROOT_TBL_MODIFY_NUM;
5118 }
5119
5120 /**
5121  * Validate the meter action.
5122  *
5123  * @param[in] dev
5124  *   Pointer to rte_eth_dev structure.
5125  * @param[in] action_flags
5126  *   Bit-fields that holds the actions detected until now.
5127  * @param[in] item_flags
5128  *   Holds the items detected.
5129  * @param[in] action
5130  *   Pointer to the meter action.
5131  * @param[in] attr
5132  *   Attributes of flow that includes this action.
5133  * @param[in] port_id_item
5134  *   Pointer to item indicating port id.
5135  * @param[out] error
5136  *   Pointer to error structure.
5137  *
5138  * @return
5139  *   0 on success, a negative errno value otherwise and rte_errno is set.
5140  */
5141 static int
5142 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5143                                 uint64_t action_flags, uint64_t item_flags,
5144                                 const struct rte_flow_action *action,
5145                                 const struct rte_flow_attr *attr,
5146                                 const struct rte_flow_item *port_id_item,
5147                                 bool *def_policy,
5148                                 struct rte_flow_error *error)
5149 {
5150         struct mlx5_priv *priv = dev->data->dev_private;
5151         const struct rte_flow_action_meter *am = action->conf;
5152         struct mlx5_flow_meter_info *fm;
5153         struct mlx5_flow_meter_policy *mtr_policy;
5154         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5155
5156         if (!am)
5157                 return rte_flow_error_set(error, EINVAL,
5158                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5159                                           "meter action conf is NULL");
5160
5161         if (action_flags & MLX5_FLOW_ACTION_METER)
5162                 return rte_flow_error_set(error, ENOTSUP,
5163                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5164                                           "meter chaining not support");
5165         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5166                 return rte_flow_error_set(error, ENOTSUP,
5167                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5168                                           "meter with jump not support");
5169         if (!priv->mtr_en)
5170                 return rte_flow_error_set(error, ENOTSUP,
5171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5172                                           NULL,
5173                                           "meter action not supported");
5174         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5175         if (!fm)
5176                 return rte_flow_error_set(error, EINVAL,
5177                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5178                                           "Meter not found");
5179         /* aso meter can always be shared by different domains */
5180         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5181             !(fm->transfer == attr->transfer ||
5182               (!fm->ingress && !attr->ingress && attr->egress) ||
5183               (!fm->egress && !attr->egress && attr->ingress)))
5184                 return rte_flow_error_set(error, EINVAL,
5185                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5186                         "Flow attributes domain are either invalid "
5187                         "or have a domain conflict with current "
5188                         "meter attributes");
5189         if (fm->def_policy) {
5190                 if (!((attr->transfer &&
5191                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5192                         (attr->egress &&
5193                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5194                         (attr->ingress &&
5195                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5196                         return rte_flow_error_set(error, EINVAL,
5197                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5198                                           "Flow attributes domain "
5199                                           "have a conflict with current "
5200                                           "meter domain attributes");
5201                 *def_policy = true;
5202         } else {
5203                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5204                                                 fm->policy_id, NULL);
5205                 if (!mtr_policy)
5206                         return rte_flow_error_set(error, EINVAL,
5207                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5208                                           "Invalid policy id for meter ");
5209                 if (!((attr->transfer && mtr_policy->transfer) ||
5210                         (attr->egress && mtr_policy->egress) ||
5211                         (attr->ingress && mtr_policy->ingress)))
5212                         return rte_flow_error_set(error, EINVAL,
5213                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5214                                           "Flow attributes domain "
5215                                           "have a conflict with current "
5216                                           "meter domain attributes");
5217                 if (attr->transfer && mtr_policy->dev) {
5218                         /**
5219                          * When policy has fate action of port_id,
5220                          * the flow should have the same src port as policy.
5221                          */
5222                         struct mlx5_priv *policy_port_priv =
5223                                         mtr_policy->dev->data->dev_private;
5224                         int32_t flow_src_port = priv->representor_id;
5225
5226                         if (port_id_item) {
5227                                 const struct rte_flow_item_port_id *spec =
5228                                                         port_id_item->spec;
5229                                 struct mlx5_priv *port_priv =
5230                                         mlx5_port_to_eswitch_info(spec->id,
5231                                                                   false);
5232                                 if (!port_priv)
5233                                         return rte_flow_error_set(error,
5234                                                 rte_errno,
5235                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5236                                                 spec,
5237                                                 "Failed to get port info.");
5238                                 flow_src_port = port_priv->representor_id;
5239                         }
5240                         if (flow_src_port != policy_port_priv->representor_id)
5241                                 return rte_flow_error_set(error,
5242                                                 rte_errno,
5243                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5244                                                 NULL,
5245                                                 "Flow and meter policy "
5246                                                 "have different src port.");
5247                 } else if (mtr_policy->is_rss) {
5248                         struct mlx5_flow_meter_policy *fp;
5249                         struct mlx5_meter_policy_action_container *acg;
5250                         struct mlx5_meter_policy_action_container *acy;
5251                         const struct rte_flow_action *rss_act;
5252                         int ret;
5253
5254                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5255                                                                 mtr_policy);
5256                         if (fp == NULL)
5257                                 return rte_flow_error_set(error, EINVAL,
5258                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5259                                                   "Unable to get the final "
5260                                                   "policy in the hierarchy");
5261                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5262                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5263                         MLX5_ASSERT(acg->fate_action ==
5264                                     MLX5_FLOW_FATE_SHARED_RSS ||
5265                                     acy->fate_action ==
5266                                     MLX5_FLOW_FATE_SHARED_RSS);
5267                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5268                                 rss_act = acg->rss;
5269                         else
5270                                 rss_act = acy->rss;
5271                         ret = mlx5_flow_validate_action_rss(rss_act,
5272                                         action_flags, dev, attr,
5273                                         item_flags, error);
5274                         if (ret)
5275                                 return ret;
5276                 }
5277                 *def_policy = false;
5278         }
5279         return 0;
5280 }
5281
5282 /**
5283  * Validate the age action.
5284  *
5285  * @param[in] action_flags
5286  *   Holds the actions detected until now.
5287  * @param[in] action
5288  *   Pointer to the age action.
5289  * @param[in] dev
5290  *   Pointer to the Ethernet device structure.
5291  * @param[out] error
5292  *   Pointer to error structure.
5293  *
5294  * @return
5295  *   0 on success, a negative errno value otherwise and rte_errno is set.
5296  */
5297 static int
5298 flow_dv_validate_action_age(uint64_t action_flags,
5299                             const struct rte_flow_action *action,
5300                             struct rte_eth_dev *dev,
5301                             struct rte_flow_error *error)
5302 {
5303         struct mlx5_priv *priv = dev->data->dev_private;
5304         const struct rte_flow_action_age *age = action->conf;
5305
5306         if (!priv->sh->cdev->config.devx ||
5307             (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
5308                 return rte_flow_error_set(error, ENOTSUP,
5309                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5310                                           NULL,
5311                                           "age action not supported");
5312         if (!(action->conf))
5313                 return rte_flow_error_set(error, EINVAL,
5314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5315                                           "configuration cannot be null");
5316         if (!(age->timeout))
5317                 return rte_flow_error_set(error, EINVAL,
5318                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5319                                           "invalid timeout value 0");
5320         if (action_flags & MLX5_FLOW_ACTION_AGE)
5321                 return rte_flow_error_set(error, EINVAL,
5322                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5323                                           "duplicate age actions set");
5324         return 0;
5325 }
5326
5327 /**
5328  * Validate the modify-header IPv4 DSCP actions.
5329  *
5330  * @param[in] action_flags
5331  *   Holds the actions detected until now.
5332  * @param[in] action
5333  *   Pointer to the modify action.
5334  * @param[in] item_flags
5335  *   Holds the items detected.
5336  * @param[out] error
5337  *   Pointer to error structure.
5338  *
5339  * @return
5340  *   0 on success, a negative errno value otherwise and rte_errno is set.
5341  */
5342 static int
5343 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5344                                          const struct rte_flow_action *action,
5345                                          const uint64_t item_flags,
5346                                          struct rte_flow_error *error)
5347 {
5348         int ret = 0;
5349
5350         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5351         if (!ret) {
5352                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5353                         return rte_flow_error_set(error, EINVAL,
5354                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5355                                                   NULL,
5356                                                   "no ipv4 item in pattern");
5357         }
5358         return ret;
5359 }
5360
5361 /**
5362  * Validate the modify-header IPv6 DSCP actions.
5363  *
5364  * @param[in] action_flags
5365  *   Holds the actions detected until now.
5366  * @param[in] action
5367  *   Pointer to the modify action.
5368  * @param[in] item_flags
5369  *   Holds the items detected.
5370  * @param[out] error
5371  *   Pointer to error structure.
5372  *
5373  * @return
5374  *   0 on success, a negative errno value otherwise and rte_errno is set.
5375  */
5376 static int
5377 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5378                                          const struct rte_flow_action *action,
5379                                          const uint64_t item_flags,
5380                                          struct rte_flow_error *error)
5381 {
5382         int ret = 0;
5383
5384         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5385         if (!ret) {
5386                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5387                         return rte_flow_error_set(error, EINVAL,
5388                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5389                                                   NULL,
5390                                                   "no ipv6 item in pattern");
5391         }
5392         return ret;
5393 }
5394
5395 int
5396 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5397                         struct mlx5_list_entry *entry, void *cb_ctx)
5398 {
5399         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5400         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5401         struct mlx5_flow_dv_modify_hdr_resource *resource =
5402                                   container_of(entry, typeof(*resource), entry);
5403         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5404
5405         key_len += ref->actions_num * sizeof(ref->actions[0]);
5406         return ref->actions_num != resource->actions_num ||
5407                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5408 }
5409
5410 static struct mlx5_indexed_pool *
5411 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5412 {
5413         struct mlx5_indexed_pool *ipool = __atomic_load_n
5414                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5415
5416         if (!ipool) {
5417                 struct mlx5_indexed_pool *expected = NULL;
5418                 struct mlx5_indexed_pool_config cfg =
5419                     (struct mlx5_indexed_pool_config) {
5420                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5421                                                                    (index + 1) *
5422                                            sizeof(struct mlx5_modification_cmd),
5423                        .trunk_size = 64,
5424                        .grow_trunk = 3,
5425                        .grow_shift = 2,
5426                        .need_lock = 1,
5427                        .release_mem_en = !!sh->config.reclaim_mode,
5428                        .per_core_cache =
5429                                        sh->config.reclaim_mode ? 0 : (1 << 16),
5430                        .malloc = mlx5_malloc,
5431                        .free = mlx5_free,
5432                        .type = "mlx5_modify_action_resource",
5433                 };
5434
5435                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5436                 ipool = mlx5_ipool_create(&cfg);
5437                 if (!ipool)
5438                         return NULL;
5439                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5440                                                  &expected, ipool, false,
5441                                                  __ATOMIC_SEQ_CST,
5442                                                  __ATOMIC_SEQ_CST)) {
5443                         mlx5_ipool_destroy(ipool);
5444                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5445                                                 __ATOMIC_SEQ_CST);
5446                 }
5447         }
5448         return ipool;
5449 }
5450
5451 struct mlx5_list_entry *
5452 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5453 {
5454         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5455         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5456         struct mlx5dv_dr_domain *ns;
5457         struct mlx5_flow_dv_modify_hdr_resource *entry;
5458         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5459         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5460                                                           ref->actions_num - 1);
5461         int ret;
5462         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5463         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5464         uint32_t idx;
5465
5466         if (unlikely(!ipool)) {
5467                 rte_flow_error_set(ctx->error, ENOMEM,
5468                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5469                                    NULL, "cannot allocate modify ipool");
5470                 return NULL;
5471         }
5472         entry = mlx5_ipool_zmalloc(ipool, &idx);
5473         if (!entry) {
5474                 rte_flow_error_set(ctx->error, ENOMEM,
5475                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5476                                    "cannot allocate resource memory");
5477                 return NULL;
5478         }
5479         rte_memcpy(&entry->ft_type,
5480                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5481                    key_len + data_len);
5482         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5483                 ns = sh->fdb_domain;
5484         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5485                 ns = sh->tx_domain;
5486         else
5487                 ns = sh->rx_domain;
5488         ret = mlx5_flow_os_create_flow_action_modify_header
5489                                         (sh->cdev->ctx, ns, entry,
5490                                          data_len, &entry->action);
5491         if (ret) {
5492                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5493                 rte_flow_error_set(ctx->error, ENOMEM,
5494                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5495                                    NULL, "cannot create modification action");
5496                 return NULL;
5497         }
5498         entry->idx = idx;
5499         return &entry->entry;
5500 }
5501
5502 struct mlx5_list_entry *
5503 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5504                         void *cb_ctx)
5505 {
5506         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5507         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5508         struct mlx5_flow_dv_modify_hdr_resource *entry;
5509         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5510         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5511         uint32_t idx;
5512
5513         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5514                                   &idx);
5515         if (!entry) {
5516                 rte_flow_error_set(ctx->error, ENOMEM,
5517                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5518                                    "cannot allocate resource memory");
5519                 return NULL;
5520         }
5521         memcpy(entry, oentry, sizeof(*entry) + data_len);
5522         entry->idx = idx;
5523         return &entry->entry;
5524 }
5525
5526 void
5527 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5528 {
5529         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5530         struct mlx5_flow_dv_modify_hdr_resource *res =
5531                 container_of(entry, typeof(*res), entry);
5532
5533         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5534 }
5535
5536 /**
5537  * Validate the sample action.
5538  *
5539  * @param[in, out] action_flags
5540  *   Holds the actions detected until now.
5541  * @param[in] action
5542  *   Pointer to the sample action.
5543  * @param[in] dev
5544  *   Pointer to the Ethernet device structure.
5545  * @param[in] attr
5546  *   Attributes of flow that includes this action.
5547  * @param[in] item_flags
5548  *   Holds the items detected.
5549  * @param[in] rss
5550  *   Pointer to the RSS action.
5551  * @param[out] sample_rss
5552  *   Pointer to the RSS action in sample action list.
5553  * @param[out] count
5554  *   Pointer to the COUNT action in sample action list.
5555  * @param[out] fdb_mirror_limit
5556  *   Pointer to the FDB mirror limitation flag.
5557  * @param[out] error
5558  *   Pointer to error structure.
5559  *
5560  * @return
5561  *   0 on success, a negative errno value otherwise and rte_errno is set.
5562  */
5563 static int
5564 flow_dv_validate_action_sample(uint64_t *action_flags,
5565                                const struct rte_flow_action *action,
5566                                struct rte_eth_dev *dev,
5567                                const struct rte_flow_attr *attr,
5568                                uint64_t item_flags,
5569                                const struct rte_flow_action_rss *rss,
5570                                const struct rte_flow_action_rss **sample_rss,
5571                                const struct rte_flow_action_count **count,
5572                                int *fdb_mirror_limit,
5573                                struct rte_flow_error *error)
5574 {
5575         struct mlx5_priv *priv = dev->data->dev_private;
5576         struct mlx5_sh_config *dev_conf = &priv->sh->config;
5577         const struct rte_flow_action_sample *sample = action->conf;
5578         const struct rte_flow_action *act;
5579         uint64_t sub_action_flags = 0;
5580         uint16_t queue_index = 0xFFFF;
5581         int actions_n = 0;
5582         int ret;
5583
5584         if (!sample)
5585                 return rte_flow_error_set(error, EINVAL,
5586                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5587                                           "configuration cannot be NULL");
5588         if (sample->ratio == 0)
5589                 return rte_flow_error_set(error, EINVAL,
5590                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5591                                           "ratio value starts from 1");
5592         if (!priv->sh->cdev->config.devx ||
5593             (sample->ratio > 0 && !priv->sampler_en))
5594                 return rte_flow_error_set(error, ENOTSUP,
5595                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5596                                           NULL,
5597                                           "sample action not supported");
5598         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5599                 return rte_flow_error_set(error, EINVAL,
5600                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5601                                           "Multiple sample actions not "
5602                                           "supported");
5603         if (*action_flags & MLX5_FLOW_ACTION_METER)
5604                 return rte_flow_error_set(error, EINVAL,
5605                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5606                                           "wrong action order, meter should "
5607                                           "be after sample action");
5608         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5609                 return rte_flow_error_set(error, EINVAL,
5610                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5611                                           "wrong action order, jump should "
5612                                           "be after sample action");
5613         if (*action_flags & MLX5_FLOW_ACTION_CT)
5614                 return rte_flow_error_set(error, EINVAL,
5615                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5616                                           "Sample after CT not supported");
5617         act = sample->actions;
5618         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5619                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5620                         return rte_flow_error_set(error, ENOTSUP,
5621                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5622                                                   act, "too many actions");
5623                 switch (act->type) {
5624                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5625                         ret = mlx5_flow_validate_action_queue(act,
5626                                                               sub_action_flags,
5627                                                               dev,
5628                                                               attr, error);
5629                         if (ret < 0)
5630                                 return ret;
5631                         queue_index = ((const struct rte_flow_action_queue *)
5632                                                         (act->conf))->index;
5633                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5634                         ++actions_n;
5635                         break;
5636                 case RTE_FLOW_ACTION_TYPE_RSS:
5637                         *sample_rss = act->conf;
5638                         ret = mlx5_flow_validate_action_rss(act,
5639                                                             sub_action_flags,
5640                                                             dev, attr,
5641                                                             item_flags,
5642                                                             error);
5643                         if (ret < 0)
5644                                 return ret;
5645                         if (rss && *sample_rss &&
5646                             ((*sample_rss)->level != rss->level ||
5647                             (*sample_rss)->types != rss->types))
5648                                 return rte_flow_error_set(error, ENOTSUP,
5649                                         RTE_FLOW_ERROR_TYPE_ACTION,
5650                                         NULL,
5651                                         "Can't use the different RSS types "
5652                                         "or level in the same flow");
5653                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5654                                 queue_index = (*sample_rss)->queue[0];
5655                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5656                         ++actions_n;
5657                         break;
5658                 case RTE_FLOW_ACTION_TYPE_MARK:
5659                         ret = flow_dv_validate_action_mark(dev, act,
5660                                                            sub_action_flags,
5661                                                            attr, error);
5662                         if (ret < 0)
5663                                 return ret;
5664                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5665                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5666                                                 MLX5_FLOW_ACTION_MARK_EXT;
5667                         else
5668                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5669                         ++actions_n;
5670                         break;
5671                 case RTE_FLOW_ACTION_TYPE_COUNT:
5672                         ret = flow_dv_validate_action_count
5673                                 (dev, false, *action_flags | sub_action_flags,
5674                                  error);
5675                         if (ret < 0)
5676                                 return ret;
5677                         *count = act->conf;
5678                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5679                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5680                         ++actions_n;
5681                         break;
5682                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5683                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5684                         ret = flow_dv_validate_action_port_id(dev,
5685                                                               sub_action_flags,
5686                                                               act,
5687                                                               attr,
5688                                                               error);
5689                         if (ret)
5690                                 return ret;
5691                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5692                         ++actions_n;
5693                         break;
5694                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5695                         ret = flow_dv_validate_action_raw_encap_decap
5696                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5697                                  &actions_n, action, item_flags, error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         ++actions_n;
5701                         break;
5702                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5703                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5704                         ret = flow_dv_validate_action_l2_encap(dev,
5705                                                                sub_action_flags,
5706                                                                act, attr,
5707                                                                error);
5708                         if (ret < 0)
5709                                 return ret;
5710                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5711                         ++actions_n;
5712                         break;
5713                 default:
5714                         return rte_flow_error_set(error, ENOTSUP,
5715                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5716                                                   NULL,
5717                                                   "Doesn't support optional "
5718                                                   "action");
5719                 }
5720         }
5721         if (attr->ingress && !attr->transfer) {
5722                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5723                                           MLX5_FLOW_ACTION_RSS)))
5724                         return rte_flow_error_set(error, EINVAL,
5725                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5726                                                   NULL,
5727                                                   "Ingress must has a dest "
5728                                                   "QUEUE for Sample");
5729         } else if (attr->egress && !attr->transfer) {
5730                 return rte_flow_error_set(error, ENOTSUP,
5731                                           RTE_FLOW_ERROR_TYPE_ACTION,
5732                                           NULL,
5733                                           "Sample Only support Ingress "
5734                                           "or E-Switch");
5735         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5736                 MLX5_ASSERT(attr->transfer);
5737                 if (sample->ratio > 1)
5738                         return rte_flow_error_set(error, ENOTSUP,
5739                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5740                                                   NULL,
5741                                                   "E-Switch doesn't support "
5742                                                   "any optional action "
5743                                                   "for sampling");
5744                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5745                         return rte_flow_error_set(error, ENOTSUP,
5746                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5747                                                   NULL,
5748                                                   "unsupported action QUEUE");
5749                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5750                         return rte_flow_error_set(error, ENOTSUP,
5751                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5752                                                   NULL,
5753                                                   "unsupported action QUEUE");
5754                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5755                         return rte_flow_error_set(error, EINVAL,
5756                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5757                                                   NULL,
5758                                                   "E-Switch must has a dest "
5759                                                   "port for mirroring");
5760                 if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
5761                      priv->representor_id != UINT16_MAX)
5762                         *fdb_mirror_limit = 1;
5763         }
5764         /* Continue validation for Xcap actions.*/
5765         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5766             (queue_index == 0xFFFF ||
5767              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5768                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5769                      MLX5_FLOW_XCAP_ACTIONS)
5770                         return rte_flow_error_set(error, ENOTSUP,
5771                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5772                                                   NULL, "encap and decap "
5773                                                   "combination aren't "
5774                                                   "supported");
5775                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5776                                                         MLX5_FLOW_ACTION_ENCAP))
5777                         return rte_flow_error_set(error, ENOTSUP,
5778                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5779                                                   NULL, "encap is not supported"
5780                                                   " for ingress traffic");
5781         }
5782         return 0;
5783 }
5784
5785 /**
5786  * Find existing modify-header resource or create and register a new one.
5787  *
5788  * @param dev[in, out]
5789  *   Pointer to rte_eth_dev structure.
5790  * @param[in, out] resource
5791  *   Pointer to modify-header resource.
5792  * @parm[in, out] dev_flow
5793  *   Pointer to the dev_flow.
5794  * @param[out] error
5795  *   pointer to error structure.
5796  *
5797  * @return
5798  *   0 on success otherwise -errno and errno is set.
5799  */
5800 static int
5801 flow_dv_modify_hdr_resource_register
5802                         (struct rte_eth_dev *dev,
5803                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5804                          struct mlx5_flow *dev_flow,
5805                          struct rte_flow_error *error)
5806 {
5807         struct mlx5_priv *priv = dev->data->dev_private;
5808         struct mlx5_dev_ctx_shared *sh = priv->sh;
5809         uint32_t key_len = sizeof(*resource) -
5810                            offsetof(typeof(*resource), ft_type) +
5811                            resource->actions_num * sizeof(resource->actions[0]);
5812         struct mlx5_list_entry *entry;
5813         struct mlx5_flow_cb_ctx ctx = {
5814                 .error = error,
5815                 .data = resource,
5816         };
5817         struct mlx5_hlist *modify_cmds;
5818         uint64_t key64;
5819
5820         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5821                                 "hdr_modify",
5822                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5823                                 true, false, sh,
5824                                 flow_dv_modify_create_cb,
5825                                 flow_dv_modify_match_cb,
5826                                 flow_dv_modify_remove_cb,
5827                                 flow_dv_modify_clone_cb,
5828                                 flow_dv_modify_clone_free_cb);
5829         if (unlikely(!modify_cmds))
5830                 return -rte_errno;
5831         resource->root = !dev_flow->dv.group;
5832         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5833                                                                 resource->root))
5834                 return rte_flow_error_set(error, EOVERFLOW,
5835                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5836                                           "too many modify header items");
5837         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5838         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5839         if (!entry)
5840                 return -rte_errno;
5841         resource = container_of(entry, typeof(*resource), entry);
5842         dev_flow->handle->dvh.modify_hdr = resource;
5843         return 0;
5844 }
5845
5846 /**
5847  * Get DV flow counter by index.
5848  *
5849  * @param[in] dev
5850  *   Pointer to the Ethernet device structure.
5851  * @param[in] idx
5852  *   mlx5 flow counter index in the container.
5853  * @param[out] ppool
5854  *   mlx5 flow counter pool in the container.
5855  *
5856  * @return
5857  *   Pointer to the counter, NULL otherwise.
5858  */
5859 static struct mlx5_flow_counter *
5860 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5861                            uint32_t idx,
5862                            struct mlx5_flow_counter_pool **ppool)
5863 {
5864         struct mlx5_priv *priv = dev->data->dev_private;
5865         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5866         struct mlx5_flow_counter_pool *pool;
5867
5868         /* Decrease to original index and clear shared bit. */
5869         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5870         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5871         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5872         MLX5_ASSERT(pool);
5873         if (ppool)
5874                 *ppool = pool;
5875         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5876 }
5877
5878 /**
5879  * Check the devx counter belongs to the pool.
5880  *
5881  * @param[in] pool
5882  *   Pointer to the counter pool.
5883  * @param[in] id
5884  *   The counter devx ID.
5885  *
5886  * @return
5887  *   True if counter belongs to the pool, false otherwise.
5888  */
5889 static bool
5890 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5891 {
5892         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5893                    MLX5_COUNTERS_PER_POOL;
5894
5895         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5896                 return true;
5897         return false;
5898 }
5899
5900 /**
5901  * Get a pool by devx counter ID.
5902  *
5903  * @param[in] cmng
5904  *   Pointer to the counter management.
5905  * @param[in] id
5906  *   The counter devx ID.
5907  *
5908  * @return
5909  *   The counter pool pointer if exists, NULL otherwise,
5910  */
5911 static struct mlx5_flow_counter_pool *
5912 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5913 {
5914         uint32_t i;
5915         struct mlx5_flow_counter_pool *pool = NULL;
5916
5917         rte_spinlock_lock(&cmng->pool_update_sl);
5918         /* Check last used pool. */
5919         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5920             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5921                 pool = cmng->pools[cmng->last_pool_idx];
5922                 goto out;
5923         }
5924         /* ID out of range means no suitable pool in the container. */
5925         if (id > cmng->max_id || id < cmng->min_id)
5926                 goto out;
5927         /*
5928          * Find the pool from the end of the container, since mostly counter
5929          * ID is sequence increasing, and the last pool should be the needed
5930          * one.
5931          */
5932         i = cmng->n_valid;
5933         while (i--) {
5934                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5935
5936                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5937                         pool = pool_tmp;
5938                         break;
5939                 }
5940         }
5941 out:
5942         rte_spinlock_unlock(&cmng->pool_update_sl);
5943         return pool;
5944 }
5945
5946 /**
5947  * Resize a counter container.
5948  *
5949  * @param[in] dev
5950  *   Pointer to the Ethernet device structure.
5951  *
5952  * @return
5953  *   0 on success, otherwise negative errno value and rte_errno is set.
5954  */
5955 static int
5956 flow_dv_container_resize(struct rte_eth_dev *dev)
5957 {
5958         struct mlx5_priv *priv = dev->data->dev_private;
5959         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5960         void *old_pools = cmng->pools;
5961         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5962         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5963         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5964
5965         if (!pools) {
5966                 rte_errno = ENOMEM;
5967                 return -ENOMEM;
5968         }
5969         if (old_pools)
5970                 memcpy(pools, old_pools, cmng->n *
5971                                        sizeof(struct mlx5_flow_counter_pool *));
5972         cmng->n = resize;
5973         cmng->pools = pools;
5974         if (old_pools)
5975                 mlx5_free(old_pools);
5976         return 0;
5977 }
5978
5979 /**
5980  * Query a devx flow counter.
5981  *
5982  * @param[in] dev
5983  *   Pointer to the Ethernet device structure.
5984  * @param[in] counter
5985  *   Index to the flow counter.
5986  * @param[out] pkts
5987  *   The statistics value of packets.
5988  * @param[out] bytes
5989  *   The statistics value of bytes.
5990  *
5991  * @return
5992  *   0 on success, otherwise a negative errno value and rte_errno is set.
5993  */
5994 static inline int
5995 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5996                      uint64_t *bytes)
5997 {
5998         struct mlx5_priv *priv = dev->data->dev_private;
5999         struct mlx5_flow_counter_pool *pool = NULL;
6000         struct mlx5_flow_counter *cnt;
6001         int offset;
6002
6003         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6004         MLX5_ASSERT(pool);
6005         if (priv->sh->cmng.counter_fallback)
6006                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6007                                         0, pkts, bytes, 0, NULL, NULL, 0);
6008         rte_spinlock_lock(&pool->sl);
6009         if (!pool->raw) {
6010                 *pkts = 0;
6011                 *bytes = 0;
6012         } else {
6013                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6014                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6015                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6016         }
6017         rte_spinlock_unlock(&pool->sl);
6018         return 0;
6019 }
6020
6021 /**
6022  * Create and initialize a new counter pool.
6023  *
6024  * @param[in] dev
6025  *   Pointer to the Ethernet device structure.
6026  * @param[out] dcs
6027  *   The devX counter handle.
6028  * @param[in] age
6029  *   Whether the pool is for counter that was allocated for aging.
6030  * @param[in/out] cont_cur
6031  *   Pointer to the container pointer, it will be update in pool resize.
6032  *
6033  * @return
6034  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6035  */
6036 static struct mlx5_flow_counter_pool *
6037 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6038                     uint32_t age)
6039 {
6040         struct mlx5_priv *priv = dev->data->dev_private;
6041         struct mlx5_flow_counter_pool *pool;
6042         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6043         bool fallback = priv->sh->cmng.counter_fallback;
6044         uint32_t size = sizeof(*pool);
6045
6046         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6047         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6048         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6049         if (!pool) {
6050                 rte_errno = ENOMEM;
6051                 return NULL;
6052         }
6053         pool->raw = NULL;
6054         pool->is_aged = !!age;
6055         pool->query_gen = 0;
6056         pool->min_dcs = dcs;
6057         rte_spinlock_init(&pool->sl);
6058         rte_spinlock_init(&pool->csl);
6059         TAILQ_INIT(&pool->counters[0]);
6060         TAILQ_INIT(&pool->counters[1]);
6061         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6062         rte_spinlock_lock(&cmng->pool_update_sl);
6063         pool->index = cmng->n_valid;
6064         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6065                 mlx5_free(pool);
6066                 rte_spinlock_unlock(&cmng->pool_update_sl);
6067                 return NULL;
6068         }
6069         cmng->pools[pool->index] = pool;
6070         cmng->n_valid++;
6071         if (unlikely(fallback)) {
6072                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6073
6074                 if (base < cmng->min_id)
6075                         cmng->min_id = base;
6076                 if (base > cmng->max_id)
6077                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6078                 cmng->last_pool_idx = pool->index;
6079         }
6080         rte_spinlock_unlock(&cmng->pool_update_sl);
6081         return pool;
6082 }
6083
6084 /**
6085  * Prepare a new counter and/or a new counter pool.
6086  *
6087  * @param[in] dev
6088  *   Pointer to the Ethernet device structure.
6089  * @param[out] cnt_free
6090  *   Where to put the pointer of a new counter.
6091  * @param[in] age
6092  *   Whether the pool is for counter that was allocated for aging.
6093  *
6094  * @return
6095  *   The counter pool pointer and @p cnt_free is set on success,
6096  *   NULL otherwise and rte_errno is set.
6097  */
6098 static struct mlx5_flow_counter_pool *
6099 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6100                              struct mlx5_flow_counter **cnt_free,
6101                              uint32_t age)
6102 {
6103         struct mlx5_priv *priv = dev->data->dev_private;
6104         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6105         struct mlx5_flow_counter_pool *pool;
6106         struct mlx5_counters tmp_tq;
6107         struct mlx5_devx_obj *dcs = NULL;
6108         struct mlx5_flow_counter *cnt;
6109         enum mlx5_counter_type cnt_type =
6110                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6111         bool fallback = priv->sh->cmng.counter_fallback;
6112         uint32_t i;
6113
6114         if (fallback) {
6115                 /* bulk_bitmap must be 0 for single counter allocation. */
6116                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6117                 if (!dcs)
6118                         return NULL;
6119                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6120                 if (!pool) {
6121                         pool = flow_dv_pool_create(dev, dcs, age);
6122                         if (!pool) {
6123                                 mlx5_devx_cmd_destroy(dcs);
6124                                 return NULL;
6125                         }
6126                 }
6127                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6128                 cnt = MLX5_POOL_GET_CNT(pool, i);
6129                 cnt->pool = pool;
6130                 cnt->dcs_when_free = dcs;
6131                 *cnt_free = cnt;
6132                 return pool;
6133         }
6134         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6135         if (!dcs) {
6136                 rte_errno = ENODATA;
6137                 return NULL;
6138         }
6139         pool = flow_dv_pool_create(dev, dcs, age);
6140         if (!pool) {
6141                 mlx5_devx_cmd_destroy(dcs);
6142                 return NULL;
6143         }
6144         TAILQ_INIT(&tmp_tq);
6145         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6146                 cnt = MLX5_POOL_GET_CNT(pool, i);
6147                 cnt->pool = pool;
6148                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6149         }
6150         rte_spinlock_lock(&cmng->csl[cnt_type]);
6151         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6152         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6153         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6154         (*cnt_free)->pool = pool;
6155         return pool;
6156 }
6157
6158 /**
6159  * Allocate a flow counter.
6160  *
6161  * @param[in] dev
6162  *   Pointer to the Ethernet device structure.
6163  * @param[in] age
6164  *   Whether the counter was allocated for aging.
6165  *
6166  * @return
6167  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6168  */
6169 static uint32_t
6170 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6171 {
6172         struct mlx5_priv *priv = dev->data->dev_private;
6173         struct mlx5_flow_counter_pool *pool = NULL;
6174         struct mlx5_flow_counter *cnt_free = NULL;
6175         bool fallback = priv->sh->cmng.counter_fallback;
6176         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6177         enum mlx5_counter_type cnt_type =
6178                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6179         uint32_t cnt_idx;
6180
6181         if (!priv->sh->cdev->config.devx) {
6182                 rte_errno = ENOTSUP;
6183                 return 0;
6184         }
6185         /* Get free counters from container. */
6186         rte_spinlock_lock(&cmng->csl[cnt_type]);
6187         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6188         if (cnt_free)
6189                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6190         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6191         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6192                 goto err;
6193         pool = cnt_free->pool;
6194         if (fallback)
6195                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6196         /* Create a DV counter action only in the first time usage. */
6197         if (!cnt_free->action) {
6198                 uint16_t offset;
6199                 struct mlx5_devx_obj *dcs;
6200                 int ret;
6201
6202                 if (!fallback) {
6203                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6204                         dcs = pool->min_dcs;
6205                 } else {
6206                         offset = 0;
6207                         dcs = cnt_free->dcs_when_free;
6208                 }
6209                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6210                                                             &cnt_free->action);
6211                 if (ret) {
6212                         rte_errno = errno;
6213                         goto err;
6214                 }
6215         }
6216         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6217                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6218         /* Update the counter reset values. */
6219         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6220                                  &cnt_free->bytes))
6221                 goto err;
6222         if (!fallback && !priv->sh->cmng.query_thread_on)
6223                 /* Start the asynchronous batch query by the host thread. */
6224                 mlx5_set_query_alarm(priv->sh);
6225         /*
6226          * When the count action isn't shared (by ID), shared_info field is
6227          * used for indirect action API's refcnt.
6228          * When the counter action is not shared neither by ID nor by indirect
6229          * action API, shared info must be 1.
6230          */
6231         cnt_free->shared_info.refcnt = 1;
6232         return cnt_idx;
6233 err:
6234         if (cnt_free) {
6235                 cnt_free->pool = pool;
6236                 if (fallback)
6237                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6238                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6239                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6240                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6241         }
6242         return 0;
6243 }
6244
6245 /**
6246  * Get age param from counter index.
6247  *
6248  * @param[in] dev
6249  *   Pointer to the Ethernet device structure.
6250  * @param[in] counter
6251  *   Index to the counter handler.
6252  *
6253  * @return
6254  *   The aging parameter specified for the counter index.
6255  */
6256 static struct mlx5_age_param*
6257 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6258                                 uint32_t counter)
6259 {
6260         struct mlx5_flow_counter *cnt;
6261         struct mlx5_flow_counter_pool *pool = NULL;
6262
6263         flow_dv_counter_get_by_idx(dev, counter, &pool);
6264         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6265         cnt = MLX5_POOL_GET_CNT(pool, counter);
6266         return MLX5_CNT_TO_AGE(cnt);
6267 }
6268
6269 /**
6270  * Remove a flow counter from aged counter list.
6271  *
6272  * @param[in] dev
6273  *   Pointer to the Ethernet device structure.
6274  * @param[in] counter
6275  *   Index to the counter handler.
6276  * @param[in] cnt
6277  *   Pointer to the counter handler.
6278  */
6279 static void
6280 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6281                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6282 {
6283         struct mlx5_age_info *age_info;
6284         struct mlx5_age_param *age_param;
6285         struct mlx5_priv *priv = dev->data->dev_private;
6286         uint16_t expected = AGE_CANDIDATE;
6287
6288         age_info = GET_PORT_AGE_INFO(priv);
6289         age_param = flow_dv_counter_idx_get_age(dev, counter);
6290         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6291                                          AGE_FREE, false, __ATOMIC_RELAXED,
6292                                          __ATOMIC_RELAXED)) {
6293                 /**
6294                  * We need the lock even it is age timeout,
6295                  * since counter may still in process.
6296                  */
6297                 rte_spinlock_lock(&age_info->aged_sl);
6298                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6299                 rte_spinlock_unlock(&age_info->aged_sl);
6300                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6301         }
6302 }
6303
6304 /**
6305  * Release a flow counter.
6306  *
6307  * @param[in] dev
6308  *   Pointer to the Ethernet device structure.
6309  * @param[in] counter
6310  *   Index to the counter handler.
6311  */
6312 static void
6313 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6314 {
6315         struct mlx5_priv *priv = dev->data->dev_private;
6316         struct mlx5_flow_counter_pool *pool = NULL;
6317         struct mlx5_flow_counter *cnt;
6318         enum mlx5_counter_type cnt_type;
6319
6320         if (!counter)
6321                 return;
6322         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6323         MLX5_ASSERT(pool);
6324         if (pool->is_aged) {
6325                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6326         } else {
6327                 /*
6328                  * If the counter action is shared by indirect action API,
6329                  * the atomic function reduces its references counter.
6330                  * If after the reduction the action is still referenced, the
6331                  * function returns here and does not release it.
6332                  * When the counter action is not shared by
6333                  * indirect action API, shared info is 1 before the reduction,
6334                  * so this condition is failed and function doesn't return here.
6335                  */
6336                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6337                                        __ATOMIC_RELAXED))
6338                         return;
6339         }
6340         cnt->pool = pool;
6341         /*
6342          * Put the counter back to list to be updated in none fallback mode.
6343          * Currently, we are using two list alternately, while one is in query,
6344          * add the freed counter to the other list based on the pool query_gen
6345          * value. After query finishes, add counter the list to the global
6346          * container counter list. The list changes while query starts. In
6347          * this case, lock will not be needed as query callback and release
6348          * function both operate with the different list.
6349          */
6350         if (!priv->sh->cmng.counter_fallback) {
6351                 rte_spinlock_lock(&pool->csl);
6352                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6353                 rte_spinlock_unlock(&pool->csl);
6354         } else {
6355                 cnt->dcs_when_free = cnt->dcs_when_active;
6356                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6357                                            MLX5_COUNTER_TYPE_ORIGIN;
6358                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6359                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6360                                   cnt, next);
6361                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6362         }
6363 }
6364
6365 /**
6366  * Resize a meter id container.
6367  *
6368  * @param[in] dev
6369  *   Pointer to the Ethernet device structure.
6370  *
6371  * @return
6372  *   0 on success, otherwise negative errno value and rte_errno is set.
6373  */
6374 static int
6375 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6376 {
6377         struct mlx5_priv *priv = dev->data->dev_private;
6378         struct mlx5_aso_mtr_pools_mng *pools_mng =
6379                                 &priv->sh->mtrmng->pools_mng;
6380         void *old_pools = pools_mng->pools;
6381         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6382         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6383         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6384
6385         if (!pools) {
6386                 rte_errno = ENOMEM;
6387                 return -ENOMEM;
6388         }
6389         if (!pools_mng->n)
6390                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6391                         mlx5_free(pools);
6392                         return -ENOMEM;
6393                 }
6394         if (old_pools)
6395                 memcpy(pools, old_pools, pools_mng->n *
6396                                        sizeof(struct mlx5_aso_mtr_pool *));
6397         pools_mng->n = resize;
6398         pools_mng->pools = pools;
6399         if (old_pools)
6400                 mlx5_free(old_pools);
6401         return 0;
6402 }
6403
6404 /**
6405  * Prepare a new meter and/or a new meter pool.
6406  *
6407  * @param[in] dev
6408  *   Pointer to the Ethernet device structure.
6409  * @param[out] mtr_free
6410  *   Where to put the pointer of a new meter.g.
6411  *
6412  * @return
6413  *   The meter pool pointer and @mtr_free is set on success,
6414  *   NULL otherwise and rte_errno is set.
6415  */
6416 static struct mlx5_aso_mtr_pool *
6417 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6418 {
6419         struct mlx5_priv *priv = dev->data->dev_private;
6420         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6421         struct mlx5_aso_mtr_pool *pool = NULL;
6422         struct mlx5_devx_obj *dcs = NULL;
6423         uint32_t i;
6424         uint32_t log_obj_size;
6425
6426         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6427         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6428                                                       priv->sh->cdev->pdn,
6429                                                       log_obj_size);
6430         if (!dcs) {
6431                 rte_errno = ENODATA;
6432                 return NULL;
6433         }
6434         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6435         if (!pool) {
6436                 rte_errno = ENOMEM;
6437                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6438                 return NULL;
6439         }
6440         pool->devx_obj = dcs;
6441         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6442         pool->index = pools_mng->n_valid;
6443         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6444                 mlx5_free(pool);
6445                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6446                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6447                 return NULL;
6448         }
6449         pools_mng->pools[pool->index] = pool;
6450         pools_mng->n_valid++;
6451         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6452         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6453                 pool->mtrs[i].offset = i;
6454                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6455         }
6456         pool->mtrs[0].offset = 0;
6457         *mtr_free = &pool->mtrs[0];
6458         return pool;
6459 }
6460
6461 /**
6462  * Release a flow meter into pool.
6463  *
6464  * @param[in] dev
6465  *   Pointer to the Ethernet device structure.
6466  * @param[in] mtr_idx
6467  *   Index to aso flow meter.
6468  */
6469 static void
6470 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6471 {
6472         struct mlx5_priv *priv = dev->data->dev_private;
6473         struct mlx5_aso_mtr_pools_mng *pools_mng =
6474                                 &priv->sh->mtrmng->pools_mng;
6475         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6476
6477         MLX5_ASSERT(aso_mtr);
6478         rte_spinlock_lock(&pools_mng->mtrsl);
6479         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6480         aso_mtr->state = ASO_METER_FREE;
6481         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6482         rte_spinlock_unlock(&pools_mng->mtrsl);
6483 }
6484
6485 /**
6486  * Allocate a aso flow meter.
6487  *
6488  * @param[in] dev
6489  *   Pointer to the Ethernet device structure.
6490  *
6491  * @return
6492  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6493  */
6494 static uint32_t
6495 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6496 {
6497         struct mlx5_priv *priv = dev->data->dev_private;
6498         struct mlx5_aso_mtr *mtr_free = NULL;
6499         struct mlx5_aso_mtr_pools_mng *pools_mng =
6500                                 &priv->sh->mtrmng->pools_mng;
6501         struct mlx5_aso_mtr_pool *pool;
6502         uint32_t mtr_idx = 0;
6503
6504         if (!priv->sh->cdev->config.devx) {
6505                 rte_errno = ENOTSUP;
6506                 return 0;
6507         }
6508         /* Allocate the flow meter memory. */
6509         /* Get free meters from management. */
6510         rte_spinlock_lock(&pools_mng->mtrsl);
6511         mtr_free = LIST_FIRST(&pools_mng->meters);
6512         if (mtr_free)
6513                 LIST_REMOVE(mtr_free, next);
6514         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6515                 rte_spinlock_unlock(&pools_mng->mtrsl);
6516                 return 0;
6517         }
6518         mtr_free->state = ASO_METER_WAIT;
6519         rte_spinlock_unlock(&pools_mng->mtrsl);
6520         pool = container_of(mtr_free,
6521                         struct mlx5_aso_mtr_pool,
6522                         mtrs[mtr_free->offset]);
6523         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6524         if (!mtr_free->fm.meter_action) {
6525 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6526                 struct rte_flow_error error;
6527                 uint8_t reg_id;
6528
6529                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6530                 mtr_free->fm.meter_action =
6531                         mlx5_glue->dv_create_flow_action_aso
6532                                                 (priv->sh->rx_domain,
6533                                                  pool->devx_obj->obj,
6534                                                  mtr_free->offset,
6535                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6536                                                  reg_id - REG_C_0);
6537 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6538                 if (!mtr_free->fm.meter_action) {
6539                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6540                         return 0;
6541                 }
6542         }
6543         return mtr_idx;
6544 }
6545
6546 /**
6547  * Verify the @p attributes will be correctly understood by the NIC and store
6548  * them in the @p flow if everything is correct.
6549  *
6550  * @param[in] dev
6551  *   Pointer to dev struct.
6552  * @param[in] attributes
6553  *   Pointer to flow attributes
6554  * @param[in] external
6555  *   This flow rule is created by request external to PMD.
6556  * @param[out] error
6557  *   Pointer to error structure.
6558  *
6559  * @return
6560  *   - 0 on success and non root table.
6561  *   - 1 on success and root table.
6562  *   - a negative errno value otherwise and rte_errno is set.
6563  */
6564 static int
6565 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6566                             const struct mlx5_flow_tunnel *tunnel,
6567                             const struct rte_flow_attr *attributes,
6568                             const struct flow_grp_info *grp_info,
6569                             struct rte_flow_error *error)
6570 {
6571         struct mlx5_priv *priv = dev->data->dev_private;
6572         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6573         int ret = 0;
6574
6575 #ifndef HAVE_MLX5DV_DR
6576         RTE_SET_USED(tunnel);
6577         RTE_SET_USED(grp_info);
6578         if (attributes->group)
6579                 return rte_flow_error_set(error, ENOTSUP,
6580                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6581                                           NULL,
6582                                           "groups are not supported");
6583 #else
6584         uint32_t table = 0;
6585
6586         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6587                                        grp_info, error);
6588         if (ret)
6589                 return ret;
6590         if (!table)
6591                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6592 #endif
6593         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6594             attributes->priority > lowest_priority)
6595                 return rte_flow_error_set(error, ENOTSUP,
6596                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6597                                           NULL,
6598                                           "priority out of range");
6599         if (attributes->transfer) {
6600                 if (!priv->sh->config.dv_esw_en)
6601                         return rte_flow_error_set
6602                                 (error, ENOTSUP,
6603                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6604                                  "E-Switch dr is not supported");
6605                 if (attributes->egress)
6606                         return rte_flow_error_set
6607                                 (error, ENOTSUP,
6608                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6609                                  "egress is not supported");
6610         }
6611         if (!(attributes->egress ^ attributes->ingress))
6612                 return rte_flow_error_set(error, ENOTSUP,
6613                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6614                                           "must specify exactly one of "
6615                                           "ingress or egress");
6616         return ret;
6617 }
6618
6619 static int
6620 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6621                         int64_t pattern_flags, uint64_t l3_flags,
6622                         uint64_t l4_flags, uint64_t ip4_flag,
6623                         struct rte_flow_error *error)
6624 {
6625         if (mask->l3_ok && !(pattern_flags & l3_flags))
6626                 return rte_flow_error_set(error, EINVAL,
6627                                           RTE_FLOW_ERROR_TYPE_ITEM,
6628                                           NULL, "missing L3 protocol");
6629
6630         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6631                 return rte_flow_error_set(error, EINVAL,
6632                                           RTE_FLOW_ERROR_TYPE_ITEM,
6633                                           NULL, "missing IPv4 protocol");
6634
6635         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6636                 return rte_flow_error_set(error, EINVAL,
6637                                           RTE_FLOW_ERROR_TYPE_ITEM,
6638                                           NULL, "missing L4 protocol");
6639
6640         return 0;
6641 }
6642
6643 static int
6644 flow_dv_validate_item_integrity_post(const struct
6645                                      rte_flow_item *integrity_items[2],
6646                                      int64_t pattern_flags,
6647                                      struct rte_flow_error *error)
6648 {
6649         const struct rte_flow_item_integrity *mask;
6650         int ret;
6651
6652         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6653                 mask = (typeof(mask))integrity_items[0]->mask;
6654                 ret = validate_integrity_bits(mask, pattern_flags,
6655                                               MLX5_FLOW_LAYER_OUTER_L3,
6656                                               MLX5_FLOW_LAYER_OUTER_L4,
6657                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6658                                               error);
6659                 if (ret)
6660                         return ret;
6661         }
6662         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6663                 mask = (typeof(mask))integrity_items[1]->mask;
6664                 ret = validate_integrity_bits(mask, pattern_flags,
6665                                               MLX5_FLOW_LAYER_INNER_L3,
6666                                               MLX5_FLOW_LAYER_INNER_L4,
6667                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6668                                               error);
6669                 if (ret)
6670                         return ret;
6671         }
6672         return 0;
6673 }
6674
6675 static int
6676 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6677                                 const struct rte_flow_item *integrity_item,
6678                                 uint64_t pattern_flags, uint64_t *last_item,
6679                                 const struct rte_flow_item *integrity_items[2],
6680                                 struct rte_flow_error *error)
6681 {
6682         struct mlx5_priv *priv = dev->data->dev_private;
6683         const struct rte_flow_item_integrity *mask = (typeof(mask))
6684                                                      integrity_item->mask;
6685         const struct rte_flow_item_integrity *spec = (typeof(spec))
6686                                                      integrity_item->spec;
6687
6688         if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
6689                 return rte_flow_error_set(error, ENOTSUP,
6690                                           RTE_FLOW_ERROR_TYPE_ITEM,
6691                                           integrity_item,
6692                                           "packet integrity integrity_item not supported");
6693         if (!spec)
6694                 return rte_flow_error_set(error, ENOTSUP,
6695                                           RTE_FLOW_ERROR_TYPE_ITEM,
6696                                           integrity_item,
6697                                           "no spec for integrity item");
6698         if (!mask)
6699                 mask = &rte_flow_item_integrity_mask;
6700         if (!mlx5_validate_integrity_item(mask))
6701                 return rte_flow_error_set(error, ENOTSUP,
6702                                           RTE_FLOW_ERROR_TYPE_ITEM,
6703                                           integrity_item,
6704                                           "unsupported integrity filter");
6705         if (spec->level > 1) {
6706                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6707                         return rte_flow_error_set
6708                                 (error, ENOTSUP,
6709                                  RTE_FLOW_ERROR_TYPE_ITEM,
6710                                  NULL, "multiple inner integrity items not supported");
6711                 integrity_items[1] = integrity_item;
6712                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6713         } else {
6714                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6715                         return rte_flow_error_set
6716                                 (error, ENOTSUP,
6717                                  RTE_FLOW_ERROR_TYPE_ITEM,
6718                                  NULL, "multiple outer integrity items not supported");
6719                 integrity_items[0] = integrity_item;
6720                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6721         }
6722         return 0;
6723 }
6724
6725 static int
6726 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6727                            const struct rte_flow_item *item,
6728                            uint64_t item_flags,
6729                            uint64_t *last_item,
6730                            bool is_inner,
6731                            struct rte_flow_error *error)
6732 {
6733         const struct rte_flow_item_flex *flow_spec = item->spec;
6734         const struct rte_flow_item_flex *flow_mask = item->mask;
6735         struct mlx5_flex_item *flex;
6736
6737         if (!flow_spec)
6738                 return rte_flow_error_set(error, EINVAL,
6739                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6740                                           "flex flow item spec cannot be NULL");
6741         if (!flow_mask)
6742                 return rte_flow_error_set(error, EINVAL,
6743                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6744                                           "flex flow item mask cannot be NULL");
6745         if (item->last)
6746                 return rte_flow_error_set(error, ENOTSUP,
6747                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6748                                           "flex flow item last not supported");
6749         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6750                 return rte_flow_error_set(error, EINVAL,
6751                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6752                                           "invalid flex flow item handle");
6753         flex = (struct mlx5_flex_item *)flow_spec->handle;
6754         switch (flex->tunnel_mode) {
6755         case FLEX_TUNNEL_MODE_SINGLE:
6756                 if (item_flags &
6757                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6758                         rte_flow_error_set(error, EINVAL,
6759                                            RTE_FLOW_ERROR_TYPE_ITEM,
6760                                            NULL, "multiple flex items not supported");
6761                 break;
6762         case FLEX_TUNNEL_MODE_OUTER:
6763                 if (is_inner)
6764                         rte_flow_error_set(error, EINVAL,
6765                                            RTE_FLOW_ERROR_TYPE_ITEM,
6766                                            NULL, "inner flex item was not configured");
6767                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6768                         rte_flow_error_set(error, ENOTSUP,
6769                                            RTE_FLOW_ERROR_TYPE_ITEM,
6770                                            NULL, "multiple flex items not supported");
6771                 break;
6772         case FLEX_TUNNEL_MODE_INNER:
6773                 if (!is_inner)
6774                         rte_flow_error_set(error, EINVAL,
6775                                            RTE_FLOW_ERROR_TYPE_ITEM,
6776                                            NULL, "outer flex item was not configured");
6777                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6778                         rte_flow_error_set(error, EINVAL,
6779                                            RTE_FLOW_ERROR_TYPE_ITEM,
6780                                            NULL, "multiple flex items not supported");
6781                 break;
6782         case FLEX_TUNNEL_MODE_MULTI:
6783                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6784                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6785                         rte_flow_error_set(error, EINVAL,
6786                                            RTE_FLOW_ERROR_TYPE_ITEM,
6787                                            NULL, "multiple flex items not supported");
6788                 }
6789                 break;
6790         case FLEX_TUNNEL_MODE_TUNNEL:
6791                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6792                         rte_flow_error_set(error, EINVAL,
6793                                            RTE_FLOW_ERROR_TYPE_ITEM,
6794                                            NULL, "multiple flex tunnel items not supported");
6795                 break;
6796         default:
6797                 rte_flow_error_set(error, EINVAL,
6798                                    RTE_FLOW_ERROR_TYPE_ITEM,
6799                                    NULL, "invalid flex item configuration");
6800         }
6801         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6802                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6803                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6804         return 0;
6805 }
6806
6807 /**
6808  * Internal validation function. For validating both actions and items.
6809  *
6810  * @param[in] dev
6811  *   Pointer to the rte_eth_dev structure.
6812  * @param[in] attr
6813  *   Pointer to the flow attributes.
6814  * @param[in] items
6815  *   Pointer to the list of items.
6816  * @param[in] actions
6817  *   Pointer to the list of actions.
6818  * @param[in] external
6819  *   This flow rule is created by request external to PMD.
6820  * @param[in] hairpin
6821  *   Number of hairpin TX actions, 0 means classic flow.
6822  * @param[out] error
6823  *   Pointer to the error structure.
6824  *
6825  * @return
6826  *   0 on success, a negative errno value otherwise and rte_errno is set.
6827  */
6828 static int
6829 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6830                  const struct rte_flow_item items[],
6831                  const struct rte_flow_action actions[],
6832                  bool external, int hairpin, struct rte_flow_error *error)
6833 {
6834         int ret;
6835         uint64_t action_flags = 0;
6836         uint64_t item_flags = 0;
6837         uint64_t last_item = 0;
6838         uint8_t next_protocol = 0xff;
6839         uint16_t ether_type = 0;
6840         int actions_n = 0;
6841         uint8_t item_ipv6_proto = 0;
6842         int fdb_mirror_limit = 0;
6843         int modify_after_mirror = 0;
6844         const struct rte_flow_item *geneve_item = NULL;
6845         const struct rte_flow_item *gre_item = NULL;
6846         const struct rte_flow_item *gtp_item = NULL;
6847         const struct rte_flow_action_raw_decap *decap;
6848         const struct rte_flow_action_raw_encap *encap;
6849         const struct rte_flow_action_rss *rss = NULL;
6850         const struct rte_flow_action_rss *sample_rss = NULL;
6851         const struct rte_flow_action_count *sample_count = NULL;
6852         const struct rte_flow_item_tcp nic_tcp_mask = {
6853                 .hdr = {
6854                         .tcp_flags = 0xFF,
6855                         .src_port = RTE_BE16(UINT16_MAX),
6856                         .dst_port = RTE_BE16(UINT16_MAX),
6857                 }
6858         };
6859         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6860                 .hdr = {
6861                         .src_addr =
6862                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6863                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6864                         .dst_addr =
6865                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6866                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6867                         .vtc_flow = RTE_BE32(0xffffffff),
6868                         .proto = 0xff,
6869                         .hop_limits = 0xff,
6870                 },
6871                 .has_frag_ext = 1,
6872         };
6873         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6874                 .hdr = {
6875                         .common = {
6876                                 .u32 =
6877                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6878                                         .type = 0xFF,
6879                                         }).u32),
6880                         },
6881                         .dummy[0] = 0xffffffff,
6882                 },
6883         };
6884         struct mlx5_priv *priv = dev->data->dev_private;
6885         struct mlx5_sh_config *dev_conf = &priv->sh->config;
6886         uint16_t queue_index = 0xFFFF;
6887         const struct rte_flow_item_vlan *vlan_m = NULL;
6888         uint32_t rw_act_num = 0;
6889         uint64_t is_root;
6890         const struct mlx5_flow_tunnel *tunnel;
6891         enum mlx5_tof_rule_type tof_rule_type;
6892         struct flow_grp_info grp_info = {
6893                 .external = !!external,
6894                 .transfer = !!attr->transfer,
6895                 .fdb_def_rule = !!priv->fdb_def_rule,
6896                 .std_tbl_fix = true,
6897         };
6898         const struct rte_eth_hairpin_conf *conf;
6899         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6900         const struct rte_flow_item *port_id_item = NULL;
6901         bool def_policy = false;
6902         uint16_t udp_dport = 0;
6903
6904         if (items == NULL)
6905                 return -1;
6906         tunnel = is_tunnel_offload_active(dev) ?
6907                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6908         if (tunnel) {
6909                 if (!dev_conf->dv_flow_en)
6910                         return rte_flow_error_set
6911                                 (error, ENOTSUP,
6912                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6913                                  NULL, "tunnel offload requires DV flow interface");
6914                 if (priv->representor)
6915                         return rte_flow_error_set
6916                                 (error, ENOTSUP,
6917                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6918                                  NULL, "decap not supported for VF representor");
6919                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6920                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6921                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6922                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6923                                         MLX5_FLOW_ACTION_DECAP;
6924                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6925                                         (dev, attr, tunnel, tof_rule_type);
6926         }
6927         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6928         if (ret < 0)
6929                 return ret;
6930         is_root = (uint64_t)ret;
6931         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6932                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6933                 int type = items->type;
6934
6935                 if (!mlx5_flow_os_item_supported(type))
6936                         return rte_flow_error_set(error, ENOTSUP,
6937                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6938                                                   NULL, "item not supported");
6939                 switch (type) {
6940                 case RTE_FLOW_ITEM_TYPE_VOID:
6941                         break;
6942                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6943                         ret = flow_dv_validate_item_port_id
6944                                         (dev, items, attr, item_flags, error);
6945                         if (ret < 0)
6946                                 return ret;
6947                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6948                         port_id_item = items;
6949                         break;
6950                 case RTE_FLOW_ITEM_TYPE_ETH:
6951                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6952                                                           true, error);
6953                         if (ret < 0)
6954                                 return ret;
6955                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6956                                              MLX5_FLOW_LAYER_OUTER_L2;
6957                         if (items->mask != NULL && items->spec != NULL) {
6958                                 ether_type =
6959                                         ((const struct rte_flow_item_eth *)
6960                                          items->spec)->type;
6961                                 ether_type &=
6962                                         ((const struct rte_flow_item_eth *)
6963                                          items->mask)->type;
6964                                 ether_type = rte_be_to_cpu_16(ether_type);
6965                         } else {
6966                                 ether_type = 0;
6967                         }
6968                         break;
6969                 case RTE_FLOW_ITEM_TYPE_VLAN:
6970                         ret = flow_dv_validate_item_vlan(items, item_flags,
6971                                                          dev, error);
6972                         if (ret < 0)
6973                                 return ret;
6974                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6975                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6976                         if (items->mask != NULL && items->spec != NULL) {
6977                                 ether_type =
6978                                         ((const struct rte_flow_item_vlan *)
6979                                          items->spec)->inner_type;
6980                                 ether_type &=
6981                                         ((const struct rte_flow_item_vlan *)
6982                                          items->mask)->inner_type;
6983                                 ether_type = rte_be_to_cpu_16(ether_type);
6984                         } else {
6985                                 ether_type = 0;
6986                         }
6987                         /* Store outer VLAN mask for of_push_vlan action. */
6988                         if (!tunnel)
6989                                 vlan_m = items->mask;
6990                         break;
6991                 case RTE_FLOW_ITEM_TYPE_IPV4:
6992                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6993                                                   &item_flags, &tunnel);
6994                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6995                                                          last_item, ether_type,
6996                                                          error);
6997                         if (ret < 0)
6998                                 return ret;
6999                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7000                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7001                         if (items->mask != NULL &&
7002                             ((const struct rte_flow_item_ipv4 *)
7003                              items->mask)->hdr.next_proto_id) {
7004                                 next_protocol =
7005                                         ((const struct rte_flow_item_ipv4 *)
7006                                          (items->spec))->hdr.next_proto_id;
7007                                 next_protocol &=
7008                                         ((const struct rte_flow_item_ipv4 *)
7009                                          (items->mask))->hdr.next_proto_id;
7010                         } else {
7011                                 /* Reset for inner layer. */
7012                                 next_protocol = 0xff;
7013                         }
7014                         break;
7015                 case RTE_FLOW_ITEM_TYPE_IPV6:
7016                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7017                                                   &item_flags, &tunnel);
7018                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7019                                                            last_item,
7020                                                            ether_type,
7021                                                            &nic_ipv6_mask,
7022                                                            error);
7023                         if (ret < 0)
7024                                 return ret;
7025                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7026                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7027                         if (items->mask != NULL &&
7028                             ((const struct rte_flow_item_ipv6 *)
7029                              items->mask)->hdr.proto) {
7030                                 item_ipv6_proto =
7031                                         ((const struct rte_flow_item_ipv6 *)
7032                                          items->spec)->hdr.proto;
7033                                 next_protocol =
7034                                         ((const struct rte_flow_item_ipv6 *)
7035                                          items->spec)->hdr.proto;
7036                                 next_protocol &=
7037                                         ((const struct rte_flow_item_ipv6 *)
7038                                          items->mask)->hdr.proto;
7039                         } else {
7040                                 /* Reset for inner layer. */
7041                                 next_protocol = 0xff;
7042                         }
7043                         break;
7044                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7045                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7046                                                                   item_flags,
7047                                                                   error);
7048                         if (ret < 0)
7049                                 return ret;
7050                         last_item = tunnel ?
7051                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7052                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7053                         if (items->mask != NULL &&
7054                             ((const struct rte_flow_item_ipv6_frag_ext *)
7055                              items->mask)->hdr.next_header) {
7056                                 next_protocol =
7057                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7058                                  items->spec)->hdr.next_header;
7059                                 next_protocol &=
7060                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7061                                  items->mask)->hdr.next_header;
7062                         } else {
7063                                 /* Reset for inner layer. */
7064                                 next_protocol = 0xff;
7065                         }
7066                         break;
7067                 case RTE_FLOW_ITEM_TYPE_TCP:
7068                         ret = mlx5_flow_validate_item_tcp
7069                                                 (items, item_flags,
7070                                                  next_protocol,
7071                                                  &nic_tcp_mask,
7072                                                  error);
7073                         if (ret < 0)
7074                                 return ret;
7075                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7076                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7077                         break;
7078                 case RTE_FLOW_ITEM_TYPE_UDP:
7079                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7080                                                           next_protocol,
7081                                                           error);
7082                         const struct rte_flow_item_udp *spec = items->spec;
7083                         const struct rte_flow_item_udp *mask = items->mask;
7084                         if (!mask)
7085                                 mask = &rte_flow_item_udp_mask;
7086                         if (spec != NULL)
7087                                 udp_dport = rte_be_to_cpu_16
7088                                                 (spec->hdr.dst_port &
7089                                                  mask->hdr.dst_port);
7090                         if (ret < 0)
7091                                 return ret;
7092                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7093                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7094                         break;
7095                 case RTE_FLOW_ITEM_TYPE_GRE:
7096                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7097                                                           next_protocol, error);
7098                         if (ret < 0)
7099                                 return ret;
7100                         gre_item = items;
7101                         last_item = MLX5_FLOW_LAYER_GRE;
7102                         break;
7103                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7104                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7105                                                             next_protocol,
7106                                                             error);
7107                         if (ret < 0)
7108                                 return ret;
7109                         last_item = MLX5_FLOW_LAYER_NVGRE;
7110                         break;
7111                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7112                         ret = mlx5_flow_validate_item_gre_key
7113                                 (items, item_flags, gre_item, error);
7114                         if (ret < 0)
7115                                 return ret;
7116                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7117                         break;
7118                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7119                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7120                                                             items, item_flags,
7121                                                             attr, error);
7122                         if (ret < 0)
7123                                 return ret;
7124                         last_item = MLX5_FLOW_LAYER_VXLAN;
7125                         break;
7126                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7127                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7128                                                                 item_flags, dev,
7129                                                                 error);
7130                         if (ret < 0)
7131                                 return ret;
7132                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7133                         break;
7134                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7135                         ret = mlx5_flow_validate_item_geneve(items,
7136                                                              item_flags, dev,
7137                                                              error);
7138                         if (ret < 0)
7139                                 return ret;
7140                         geneve_item = items;
7141                         last_item = MLX5_FLOW_LAYER_GENEVE;
7142                         break;
7143                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7144                         ret = mlx5_flow_validate_item_geneve_opt(items,
7145                                                                  last_item,
7146                                                                  geneve_item,
7147                                                                  dev,
7148                                                                  error);
7149                         if (ret < 0)
7150                                 return ret;
7151                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7152                         break;
7153                 case RTE_FLOW_ITEM_TYPE_MPLS:
7154                         ret = mlx5_flow_validate_item_mpls(dev, items,
7155                                                            item_flags,
7156                                                            last_item, error);
7157                         if (ret < 0)
7158                                 return ret;
7159                         last_item = MLX5_FLOW_LAYER_MPLS;
7160                         break;
7161
7162                 case RTE_FLOW_ITEM_TYPE_MARK:
7163                         ret = flow_dv_validate_item_mark(dev, items, attr,
7164                                                          error);
7165                         if (ret < 0)
7166                                 return ret;
7167                         last_item = MLX5_FLOW_ITEM_MARK;
7168                         break;
7169                 case RTE_FLOW_ITEM_TYPE_META:
7170                         ret = flow_dv_validate_item_meta(dev, items, attr,
7171                                                          error);
7172                         if (ret < 0)
7173                                 return ret;
7174                         last_item = MLX5_FLOW_ITEM_METADATA;
7175                         break;
7176                 case RTE_FLOW_ITEM_TYPE_ICMP:
7177                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7178                                                            next_protocol,
7179                                                            error);
7180                         if (ret < 0)
7181                                 return ret;
7182                         last_item = MLX5_FLOW_LAYER_ICMP;
7183                         break;
7184                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7185                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7186                                                             next_protocol,
7187                                                             error);
7188                         if (ret < 0)
7189                                 return ret;
7190                         item_ipv6_proto = IPPROTO_ICMPV6;
7191                         last_item = MLX5_FLOW_LAYER_ICMP6;
7192                         break;
7193                 case RTE_FLOW_ITEM_TYPE_TAG:
7194                         ret = flow_dv_validate_item_tag(dev, items,
7195                                                         attr, error);
7196                         if (ret < 0)
7197                                 return ret;
7198                         last_item = MLX5_FLOW_ITEM_TAG;
7199                         break;
7200                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7201                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7202                         break;
7203                 case RTE_FLOW_ITEM_TYPE_GTP:
7204                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7205                                                         error);
7206                         if (ret < 0)
7207                                 return ret;
7208                         gtp_item = items;
7209                         last_item = MLX5_FLOW_LAYER_GTP;
7210                         break;
7211                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7212                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7213                                                             gtp_item, attr,
7214                                                             error);
7215                         if (ret < 0)
7216                                 return ret;
7217                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7218                         break;
7219                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7220                         /* Capacity will be checked in the translate stage. */
7221                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7222                                                             last_item,
7223                                                             ether_type,
7224                                                             &nic_ecpri_mask,
7225                                                             error);
7226                         if (ret < 0)
7227                                 return ret;
7228                         last_item = MLX5_FLOW_LAYER_ECPRI;
7229                         break;
7230                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7231                         ret = flow_dv_validate_item_integrity(dev, items,
7232                                                               item_flags,
7233                                                               &last_item,
7234                                                               integrity_items,
7235                                                               error);
7236                         if (ret < 0)
7237                                 return ret;
7238                         break;
7239                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7240                         ret = flow_dv_validate_item_aso_ct(dev, items,
7241                                                            &item_flags, error);
7242                         if (ret < 0)
7243                                 return ret;
7244                         break;
7245                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7246                         /* tunnel offload item was processed before
7247                          * list it here as a supported type
7248                          */
7249                         break;
7250                 case RTE_FLOW_ITEM_TYPE_FLEX:
7251                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7252                                                          &last_item,
7253                                                          tunnel != 0, error);
7254                         if (ret < 0)
7255                                 return ret;
7256                         break;
7257                 default:
7258                         return rte_flow_error_set(error, ENOTSUP,
7259                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7260                                                   NULL, "item not supported");
7261                 }
7262                 item_flags |= last_item;
7263         }
7264         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7265                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7266                                                            item_flags, error);
7267                 if (ret)
7268                         return ret;
7269         }
7270         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7271                 int type = actions->type;
7272                 bool shared_count = false;
7273
7274                 if (!mlx5_flow_os_action_supported(type))
7275                         return rte_flow_error_set(error, ENOTSUP,
7276                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7277                                                   actions,
7278                                                   "action not supported");
7279                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7280                         return rte_flow_error_set(error, ENOTSUP,
7281                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7282                                                   actions, "too many actions");
7283                 if (action_flags &
7284                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7285                         return rte_flow_error_set(error, ENOTSUP,
7286                                 RTE_FLOW_ERROR_TYPE_ACTION,
7287                                 NULL, "meter action with policy "
7288                                 "must be the last action");
7289                 switch (type) {
7290                 case RTE_FLOW_ACTION_TYPE_VOID:
7291                         break;
7292                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7293                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7294                         ret = flow_dv_validate_action_port_id(dev,
7295                                                               action_flags,
7296                                                               actions,
7297                                                               attr,
7298                                                               error);
7299                         if (ret)
7300                                 return ret;
7301                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7302                         ++actions_n;
7303                         break;
7304                 case RTE_FLOW_ACTION_TYPE_FLAG:
7305                         ret = flow_dv_validate_action_flag(dev, action_flags,
7306                                                            attr, error);
7307                         if (ret < 0)
7308                                 return ret;
7309                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7310                                 /* Count all modify-header actions as one. */
7311                                 if (!(action_flags &
7312                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7313                                         ++actions_n;
7314                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7315                                                 MLX5_FLOW_ACTION_MARK_EXT;
7316                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7317                                         modify_after_mirror = 1;
7318
7319                         } else {
7320                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7321                                 ++actions_n;
7322                         }
7323                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7324                         break;
7325                 case RTE_FLOW_ACTION_TYPE_MARK:
7326                         ret = flow_dv_validate_action_mark(dev, actions,
7327                                                            action_flags,
7328                                                            attr, error);
7329                         if (ret < 0)
7330                                 return ret;
7331                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7332                                 /* Count all modify-header actions as one. */
7333                                 if (!(action_flags &
7334                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7335                                         ++actions_n;
7336                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7337                                                 MLX5_FLOW_ACTION_MARK_EXT;
7338                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7339                                         modify_after_mirror = 1;
7340                         } else {
7341                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7342                                 ++actions_n;
7343                         }
7344                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7345                         break;
7346                 case RTE_FLOW_ACTION_TYPE_SET_META:
7347                         ret = flow_dv_validate_action_set_meta(dev, actions,
7348                                                                action_flags,
7349                                                                attr, error);
7350                         if (ret < 0)
7351                                 return ret;
7352                         /* Count all modify-header actions as one action. */
7353                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7354                                 ++actions_n;
7355                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7356                                 modify_after_mirror = 1;
7357                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7358                         rw_act_num += MLX5_ACT_NUM_SET_META;
7359                         break;
7360                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7361                         ret = flow_dv_validate_action_set_tag(dev, actions,
7362                                                               action_flags,
7363                                                               attr, error);
7364                         if (ret < 0)
7365                                 return ret;
7366                         /* Count all modify-header actions as one action. */
7367                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7368                                 ++actions_n;
7369                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7370                                 modify_after_mirror = 1;
7371                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7372                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7373                         break;
7374                 case RTE_FLOW_ACTION_TYPE_DROP:
7375                         ret = mlx5_flow_validate_action_drop(action_flags,
7376                                                              attr, error);
7377                         if (ret < 0)
7378                                 return ret;
7379                         action_flags |= MLX5_FLOW_ACTION_DROP;
7380                         ++actions_n;
7381                         break;
7382                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7383                         ret = mlx5_flow_validate_action_queue(actions,
7384                                                               action_flags, dev,
7385                                                               attr, error);
7386                         if (ret < 0)
7387                                 return ret;
7388                         queue_index = ((const struct rte_flow_action_queue *)
7389                                                         (actions->conf))->index;
7390                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7391                         ++actions_n;
7392                         break;
7393                 case RTE_FLOW_ACTION_TYPE_RSS:
7394                         rss = actions->conf;
7395                         ret = mlx5_flow_validate_action_rss(actions,
7396                                                             action_flags, dev,
7397                                                             attr, item_flags,
7398                                                             error);
7399                         if (ret < 0)
7400                                 return ret;
7401                         if (rss && sample_rss &&
7402                             (sample_rss->level != rss->level ||
7403                             sample_rss->types != rss->types))
7404                                 return rte_flow_error_set(error, ENOTSUP,
7405                                         RTE_FLOW_ERROR_TYPE_ACTION,
7406                                         NULL,
7407                                         "Can't use the different RSS types "
7408                                         "or level in the same flow");
7409                         if (rss != NULL && rss->queue_num)
7410                                 queue_index = rss->queue[0];
7411                         action_flags |= MLX5_FLOW_ACTION_RSS;
7412                         ++actions_n;
7413                         break;
7414                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7415                         ret =
7416                         mlx5_flow_validate_action_default_miss(action_flags,
7417                                         attr, error);
7418                         if (ret < 0)
7419                                 return ret;
7420                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7421                         ++actions_n;
7422                         break;
7423                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7424                         shared_count = true;
7425                         /* fall-through. */
7426                 case RTE_FLOW_ACTION_TYPE_COUNT:
7427                         ret = flow_dv_validate_action_count(dev, shared_count,
7428                                                             action_flags,
7429                                                             error);
7430                         if (ret < 0)
7431                                 return ret;
7432                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7433                         ++actions_n;
7434                         break;
7435                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7436                         if (flow_dv_validate_action_pop_vlan(dev,
7437                                                              action_flags,
7438                                                              actions,
7439                                                              item_flags, attr,
7440                                                              error))
7441                                 return -rte_errno;
7442                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7443                                 modify_after_mirror = 1;
7444                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7445                         ++actions_n;
7446                         break;
7447                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7448                         ret = flow_dv_validate_action_push_vlan(dev,
7449                                                                 action_flags,
7450                                                                 vlan_m,
7451                                                                 actions, attr,
7452                                                                 error);
7453                         if (ret < 0)
7454                                 return ret;
7455                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7456                                 modify_after_mirror = 1;
7457                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7458                         ++actions_n;
7459                         break;
7460                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7461                         ret = flow_dv_validate_action_set_vlan_pcp
7462                                                 (action_flags, actions, error);
7463                         if (ret < 0)
7464                                 return ret;
7465                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7466                                 modify_after_mirror = 1;
7467                         /* Count PCP with push_vlan command. */
7468                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7469                         break;
7470                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7471                         ret = flow_dv_validate_action_set_vlan_vid
7472                                                 (item_flags, action_flags,
7473                                                  actions, error);
7474                         if (ret < 0)
7475                                 return ret;
7476                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7477                                 modify_after_mirror = 1;
7478                         /* Count VID with push_vlan command. */
7479                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7480                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7481                         break;
7482                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7483                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7484                         ret = flow_dv_validate_action_l2_encap(dev,
7485                                                                action_flags,
7486                                                                actions, attr,
7487                                                                error);
7488                         if (ret < 0)
7489                                 return ret;
7490                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7491                         ++actions_n;
7492                         break;
7493                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7494                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7495                         ret = flow_dv_validate_action_decap(dev, action_flags,
7496                                                             actions, item_flags,
7497                                                             attr, error);
7498                         if (ret < 0)
7499                                 return ret;
7500                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7501                                 modify_after_mirror = 1;
7502                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7503                         ++actions_n;
7504                         break;
7505                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7506                         ret = flow_dv_validate_action_raw_encap_decap
7507                                 (dev, NULL, actions->conf, attr, &action_flags,
7508                                  &actions_n, actions, item_flags, error);
7509                         if (ret < 0)
7510                                 return ret;
7511                         break;
7512                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7513                         decap = actions->conf;
7514                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7515                                 ;
7516                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7517                                 encap = NULL;
7518                                 actions--;
7519                         } else {
7520                                 encap = actions->conf;
7521                         }
7522                         ret = flow_dv_validate_action_raw_encap_decap
7523                                            (dev,
7524                                             decap ? decap : &empty_decap, encap,
7525                                             attr, &action_flags, &actions_n,
7526                                             actions, item_flags, error);
7527                         if (ret < 0)
7528                                 return ret;
7529                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7530                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7531                                 modify_after_mirror = 1;
7532                         break;
7533                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7534                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7535                         ret = flow_dv_validate_action_modify_mac(action_flags,
7536                                                                  actions,
7537                                                                  item_flags,
7538                                                                  error);
7539                         if (ret < 0)
7540                                 return ret;
7541                         /* Count all modify-header actions as one action. */
7542                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7543                                 ++actions_n;
7544                         action_flags |= actions->type ==
7545                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7546                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7547                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7548                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7549                                 modify_after_mirror = 1;
7550                         /*
7551                          * Even if the source and destination MAC addresses have
7552                          * overlap in the header with 4B alignment, the convert
7553                          * function will handle them separately and 4 SW actions
7554                          * will be created. And 2 actions will be added each
7555                          * time no matter how many bytes of address will be set.
7556                          */
7557                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7558                         break;
7559                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7560                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7561                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7562                                                                   actions,
7563                                                                   item_flags,
7564                                                                   error);
7565                         if (ret < 0)
7566                                 return ret;
7567                         /* Count all modify-header actions as one action. */
7568                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7569                                 ++actions_n;
7570                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7571                                 modify_after_mirror = 1;
7572                         action_flags |= actions->type ==
7573                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7574                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7575                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7576                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7577                         break;
7578                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7579                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7580                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7581                                                                   actions,
7582                                                                   item_flags,
7583                                                                   error);
7584                         if (ret < 0)
7585                                 return ret;
7586                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7587                                 return rte_flow_error_set(error, ENOTSUP,
7588                                         RTE_FLOW_ERROR_TYPE_ACTION,
7589                                         actions,
7590                                         "Can't change header "
7591                                         "with ICMPv6 proto");
7592                         /* Count all modify-header actions as one action. */
7593                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7594                                 ++actions_n;
7595                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7596                                 modify_after_mirror = 1;
7597                         action_flags |= actions->type ==
7598                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7599                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7600                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7601                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7602                         break;
7603                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7604                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7605                         ret = flow_dv_validate_action_modify_tp(action_flags,
7606                                                                 actions,
7607                                                                 item_flags,
7608                                                                 error);
7609                         if (ret < 0)
7610                                 return ret;
7611                         /* Count all modify-header actions as one action. */
7612                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7613                                 ++actions_n;
7614                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7615                                 modify_after_mirror = 1;
7616                         action_flags |= actions->type ==
7617                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7618                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7619                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7620                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7621                         break;
7622                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7623                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7624                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7625                                                                  actions,
7626                                                                  item_flags,
7627                                                                  error);
7628                         if (ret < 0)
7629                                 return ret;
7630                         /* Count all modify-header actions as one action. */
7631                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7632                                 ++actions_n;
7633                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7634                                 modify_after_mirror = 1;
7635                         action_flags |= actions->type ==
7636                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7637                                                 MLX5_FLOW_ACTION_SET_TTL :
7638                                                 MLX5_FLOW_ACTION_DEC_TTL;
7639                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7640                         break;
7641                 case RTE_FLOW_ACTION_TYPE_JUMP:
7642                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7643                                                            action_flags,
7644                                                            attr, external,
7645                                                            error);
7646                         if (ret)
7647                                 return ret;
7648                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7649                             fdb_mirror_limit)
7650                                 return rte_flow_error_set(error, EINVAL,
7651                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7652                                                   NULL,
7653                                                   "sample and jump action combination is not supported");
7654                         ++actions_n;
7655                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7656                         break;
7657                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7658                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7659                         ret = flow_dv_validate_action_modify_tcp_seq
7660                                                                 (action_flags,
7661                                                                  actions,
7662                                                                  item_flags,
7663                                                                  error);
7664                         if (ret < 0)
7665                                 return ret;
7666                         /* Count all modify-header actions as one action. */
7667                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7668                                 ++actions_n;
7669                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7670                                 modify_after_mirror = 1;
7671                         action_flags |= actions->type ==
7672                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7673                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7674                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7675                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7676                         break;
7677                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7678                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7679                         ret = flow_dv_validate_action_modify_tcp_ack
7680                                                                 (action_flags,
7681                                                                  actions,
7682                                                                  item_flags,
7683                                                                  error);
7684                         if (ret < 0)
7685                                 return ret;
7686                         /* Count all modify-header actions as one action. */
7687                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7688                                 ++actions_n;
7689                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7690                                 modify_after_mirror = 1;
7691                         action_flags |= actions->type ==
7692                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7693                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7694                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7695                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7696                         break;
7697                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7698                         break;
7699                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7700                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7701                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7702                         break;
7703                 case RTE_FLOW_ACTION_TYPE_METER:
7704                         ret = mlx5_flow_validate_action_meter(dev,
7705                                                               action_flags,
7706                                                               item_flags,
7707                                                               actions, attr,
7708                                                               port_id_item,
7709                                                               &def_policy,
7710                                                               error);
7711                         if (ret < 0)
7712                                 return ret;
7713                         action_flags |= MLX5_FLOW_ACTION_METER;
7714                         if (!def_policy)
7715                                 action_flags |=
7716                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7717                         ++actions_n;
7718                         /* Meter action will add one more TAG action. */
7719                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7720                         break;
7721                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7722                         if (!attr->transfer && !attr->group)
7723                                 return rte_flow_error_set(error, ENOTSUP,
7724                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7725                                                                            NULL,
7726                           "Shared ASO age action is not supported for group 0");
7727                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7728                                 return rte_flow_error_set
7729                                                   (error, EINVAL,
7730                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7731                                                    NULL,
7732                                                    "duplicate age actions set");
7733                         action_flags |= MLX5_FLOW_ACTION_AGE;
7734                         ++actions_n;
7735                         break;
7736                 case RTE_FLOW_ACTION_TYPE_AGE:
7737                         ret = flow_dv_validate_action_age(action_flags,
7738                                                           actions, dev,
7739                                                           error);
7740                         if (ret < 0)
7741                                 return ret;
7742                         /*
7743                          * Validate the regular AGE action (using counter)
7744                          * mutual exclusion with share counter actions.
7745                          */
7746                         if (!priv->sh->flow_hit_aso_en) {
7747                                 if (shared_count)
7748                                         return rte_flow_error_set
7749                                                 (error, EINVAL,
7750                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7751                                                 NULL,
7752                                                 "old age and shared count combination is not supported");
7753                                 if (sample_count)
7754                                         return rte_flow_error_set
7755                                                 (error, EINVAL,
7756                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7757                                                 NULL,
7758                                                 "old age action and count must be in the same sub flow");
7759                         }
7760                         action_flags |= MLX5_FLOW_ACTION_AGE;
7761                         ++actions_n;
7762                         break;
7763                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7764                         ret = flow_dv_validate_action_modify_ipv4_dscp
7765                                                          (action_flags,
7766                                                           actions,
7767                                                           item_flags,
7768                                                           error);
7769                         if (ret < 0)
7770                                 return ret;
7771                         /* Count all modify-header actions as one action. */
7772                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7773                                 ++actions_n;
7774                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7775                                 modify_after_mirror = 1;
7776                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7777                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7778                         break;
7779                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7780                         ret = flow_dv_validate_action_modify_ipv6_dscp
7781                                                                 (action_flags,
7782                                                                  actions,
7783                                                                  item_flags,
7784                                                                  error);
7785                         if (ret < 0)
7786                                 return ret;
7787                         /* Count all modify-header actions as one action. */
7788                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7789                                 ++actions_n;
7790                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7791                                 modify_after_mirror = 1;
7792                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7793                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7794                         break;
7795                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7796                         ret = flow_dv_validate_action_sample(&action_flags,
7797                                                              actions, dev,
7798                                                              attr, item_flags,
7799                                                              rss, &sample_rss,
7800                                                              &sample_count,
7801                                                              &fdb_mirror_limit,
7802                                                              error);
7803                         if (ret < 0)
7804                                 return ret;
7805                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7806                         ++actions_n;
7807                         break;
7808                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7809                         ret = flow_dv_validate_action_modify_field(dev,
7810                                                                    action_flags,
7811                                                                    actions,
7812                                                                    attr,
7813                                                                    error);
7814                         if (ret < 0)
7815                                 return ret;
7816                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7817                                 modify_after_mirror = 1;
7818                         /* Count all modify-header actions as one action. */
7819                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7820                                 ++actions_n;
7821                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7822                         rw_act_num += ret;
7823                         break;
7824                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7825                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7826                                                              item_flags, attr,
7827                                                              error);
7828                         if (ret < 0)
7829                                 return ret;
7830                         action_flags |= MLX5_FLOW_ACTION_CT;
7831                         break;
7832                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7833                         /* tunnel offload action was processed before
7834                          * list it here as a supported type
7835                          */
7836                         break;
7837                 default:
7838                         return rte_flow_error_set(error, ENOTSUP,
7839                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7840                                                   actions,
7841                                                   "action not supported");
7842                 }
7843         }
7844         /*
7845          * Validate actions in flow rules
7846          * - Explicit decap action is prohibited by the tunnel offload API.
7847          * - Drop action in tunnel steer rule is prohibited by the API.
7848          * - Application cannot use MARK action because it's value can mask
7849          *   tunnel default miss notification.
7850          * - JUMP in tunnel match rule has no support in current PMD
7851          *   implementation.
7852          * - TAG & META are reserved for future uses.
7853          */
7854         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7855                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7856                                             MLX5_FLOW_ACTION_MARK     |
7857                                             MLX5_FLOW_ACTION_SET_TAG  |
7858                                             MLX5_FLOW_ACTION_SET_META |
7859                                             MLX5_FLOW_ACTION_DROP;
7860
7861                 if (action_flags & bad_actions_mask)
7862                         return rte_flow_error_set
7863                                         (error, EINVAL,
7864                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7865                                         "Invalid RTE action in tunnel "
7866                                         "set decap rule");
7867                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7868                         return rte_flow_error_set
7869                                         (error, EINVAL,
7870                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7871                                         "tunnel set decap rule must terminate "
7872                                         "with JUMP");
7873                 if (!attr->ingress)
7874                         return rte_flow_error_set
7875                                         (error, EINVAL,
7876                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7877                                         "tunnel flows for ingress traffic only");
7878         }
7879         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7880                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7881                                             MLX5_FLOW_ACTION_MARK    |
7882                                             MLX5_FLOW_ACTION_SET_TAG |
7883                                             MLX5_FLOW_ACTION_SET_META;
7884
7885                 if (action_flags & bad_actions_mask)
7886                         return rte_flow_error_set
7887                                         (error, EINVAL,
7888                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7889                                         "Invalid RTE action in tunnel "
7890                                         "set match rule");
7891         }
7892         /*
7893          * Validate the drop action mutual exclusion with other actions.
7894          * Drop action is mutually-exclusive with any other action, except for
7895          * Count action.
7896          * Drop action compatibility with tunnel offload was already validated.
7897          */
7898         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7899                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7900         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7901             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7902                 return rte_flow_error_set(error, EINVAL,
7903                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7904                                           "Drop action is mutually-exclusive "
7905                                           "with any other action, except for "
7906                                           "Count action");
7907         /* Eswitch has few restrictions on using items and actions */
7908         if (attr->transfer) {
7909                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7910                     action_flags & MLX5_FLOW_ACTION_FLAG)
7911                         return rte_flow_error_set(error, ENOTSUP,
7912                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7913                                                   NULL,
7914                                                   "unsupported action FLAG");
7915                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7916                     action_flags & MLX5_FLOW_ACTION_MARK)
7917                         return rte_flow_error_set(error, ENOTSUP,
7918                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7919                                                   NULL,
7920                                                   "unsupported action MARK");
7921                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7922                         return rte_flow_error_set(error, ENOTSUP,
7923                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7924                                                   NULL,
7925                                                   "unsupported action QUEUE");
7926                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7927                         return rte_flow_error_set(error, ENOTSUP,
7928                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7929                                                   NULL,
7930                                                   "unsupported action RSS");
7931                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7932                         return rte_flow_error_set(error, EINVAL,
7933                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7934                                                   actions,
7935                                                   "no fate action is found");
7936         } else {
7937                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7938                         return rte_flow_error_set(error, EINVAL,
7939                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7940                                                   actions,
7941                                                   "no fate action is found");
7942         }
7943         /*
7944          * Continue validation for Xcap and VLAN actions.
7945          * If hairpin is working in explicit TX rule mode, there is no actions
7946          * splitting and the validation of hairpin ingress flow should be the
7947          * same as other standard flows.
7948          */
7949         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7950                              MLX5_FLOW_VLAN_ACTIONS)) &&
7951             (queue_index == 0xFFFF ||
7952              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7953              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7954              conf->tx_explicit != 0))) {
7955                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7956                     MLX5_FLOW_XCAP_ACTIONS)
7957                         return rte_flow_error_set(error, ENOTSUP,
7958                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7959                                                   NULL, "encap and decap "
7960                                                   "combination aren't supported");
7961                 if (!attr->transfer && attr->ingress) {
7962                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7963                                 return rte_flow_error_set
7964                                                 (error, ENOTSUP,
7965                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7966                                                  NULL, "encap is not supported"
7967                                                  " for ingress traffic");
7968                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7969                                 return rte_flow_error_set
7970                                                 (error, ENOTSUP,
7971                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7972                                                  NULL, "push VLAN action not "
7973                                                  "supported for ingress");
7974                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7975                                         MLX5_FLOW_VLAN_ACTIONS)
7976                                 return rte_flow_error_set
7977                                                 (error, ENOTSUP,
7978                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7979                                                  NULL, "no support for "
7980                                                  "multiple VLAN actions");
7981                 }
7982         }
7983         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7984                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7985                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7986                         attr->ingress)
7987                         return rte_flow_error_set
7988                                 (error, ENOTSUP,
7989                                 RTE_FLOW_ERROR_TYPE_ACTION,
7990                                 NULL, "fate action not supported for "
7991                                 "meter with policy");
7992                 if (attr->egress) {
7993                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7994                                 return rte_flow_error_set
7995                                         (error, ENOTSUP,
7996                                         RTE_FLOW_ERROR_TYPE_ACTION,
7997                                         NULL, "modify header action in egress "
7998                                         "cannot be done before meter action");
7999                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8000                                 return rte_flow_error_set
8001                                         (error, ENOTSUP,
8002                                         RTE_FLOW_ERROR_TYPE_ACTION,
8003                                         NULL, "encap action in egress "
8004                                         "cannot be done before meter action");
8005                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8006                                 return rte_flow_error_set
8007                                         (error, ENOTSUP,
8008                                         RTE_FLOW_ERROR_TYPE_ACTION,
8009                                         NULL, "push vlan action in egress "
8010                                         "cannot be done before meter action");
8011                 }
8012         }
8013         /*
8014          * Hairpin flow will add one more TAG action in TX implicit mode.
8015          * In TX explicit mode, there will be no hairpin flow ID.
8016          */
8017         if (hairpin > 0)
8018                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8019         /* extra metadata enabled: one more TAG action will be add. */
8020         if (dev_conf->dv_flow_en &&
8021             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8022             mlx5_flow_ext_mreg_supported(dev))
8023                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8024         if (rw_act_num >
8025                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8026                 return rte_flow_error_set(error, ENOTSUP,
8027                                           RTE_FLOW_ERROR_TYPE_ACTION,
8028                                           NULL, "too many header modify"
8029                                           " actions to support");
8030         }
8031         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8032         if (fdb_mirror_limit && modify_after_mirror)
8033                 return rte_flow_error_set(error, EINVAL,
8034                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8035                                 "sample before modify action is not supported");
8036         return 0;
8037 }
8038
8039 /**
8040  * Internal preparation function. Allocates the DV flow size,
8041  * this size is constant.
8042  *
8043  * @param[in] dev
8044  *   Pointer to the rte_eth_dev structure.
8045  * @param[in] attr
8046  *   Pointer to the flow attributes.
8047  * @param[in] items
8048  *   Pointer to the list of items.
8049  * @param[in] actions
8050  *   Pointer to the list of actions.
8051  * @param[out] error
8052  *   Pointer to the error structure.
8053  *
8054  * @return
8055  *   Pointer to mlx5_flow object on success,
8056  *   otherwise NULL and rte_errno is set.
8057  */
8058 static struct mlx5_flow *
8059 flow_dv_prepare(struct rte_eth_dev *dev,
8060                 const struct rte_flow_attr *attr __rte_unused,
8061                 const struct rte_flow_item items[] __rte_unused,
8062                 const struct rte_flow_action actions[] __rte_unused,
8063                 struct rte_flow_error *error)
8064 {
8065         uint32_t handle_idx = 0;
8066         struct mlx5_flow *dev_flow;
8067         struct mlx5_flow_handle *dev_handle;
8068         struct mlx5_priv *priv = dev->data->dev_private;
8069         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8070
8071         MLX5_ASSERT(wks);
8072         wks->skip_matcher_reg = 0;
8073         wks->policy = NULL;
8074         wks->final_policy = NULL;
8075         /* In case of corrupting the memory. */
8076         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8077                 rte_flow_error_set(error, ENOSPC,
8078                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8079                                    "not free temporary device flow");
8080                 return NULL;
8081         }
8082         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8083                                    &handle_idx);
8084         if (!dev_handle) {
8085                 rte_flow_error_set(error, ENOMEM,
8086                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8087                                    "not enough memory to create flow handle");
8088                 return NULL;
8089         }
8090         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8091         dev_flow = &wks->flows[wks->flow_idx++];
8092         memset(dev_flow, 0, sizeof(*dev_flow));
8093         dev_flow->handle = dev_handle;
8094         dev_flow->handle_idx = handle_idx;
8095         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8096         dev_flow->ingress = attr->ingress;
8097         dev_flow->dv.transfer = attr->transfer;
8098         return dev_flow;
8099 }
8100
8101 #ifdef RTE_LIBRTE_MLX5_DEBUG
8102 /**
8103  * Sanity check for match mask and value. Similar to check_valid_spec() in
8104  * kernel driver. If unmasked bit is present in value, it returns failure.
8105  *
8106  * @param match_mask
8107  *   pointer to match mask buffer.
8108  * @param match_value
8109  *   pointer to match value buffer.
8110  *
8111  * @return
8112  *   0 if valid, -EINVAL otherwise.
8113  */
8114 static int
8115 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8116 {
8117         uint8_t *m = match_mask;
8118         uint8_t *v = match_value;
8119         unsigned int i;
8120
8121         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8122                 if (v[i] & ~m[i]) {
8123                         DRV_LOG(ERR,
8124                                 "match_value differs from match_criteria"
8125                                 " %p[%u] != %p[%u]",
8126                                 match_value, i, match_mask, i);
8127                         return -EINVAL;
8128                 }
8129         }
8130         return 0;
8131 }
8132 #endif
8133
8134 /**
8135  * Add match of ip_version.
8136  *
8137  * @param[in] group
8138  *   Flow group.
8139  * @param[in] headers_v
8140  *   Values header pointer.
8141  * @param[in] headers_m
8142  *   Masks header pointer.
8143  * @param[in] ip_version
8144  *   The IP version to set.
8145  */
8146 static inline void
8147 flow_dv_set_match_ip_version(uint32_t group,
8148                              void *headers_v,
8149                              void *headers_m,
8150                              uint8_t ip_version)
8151 {
8152         if (group == 0)
8153                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8154         else
8155                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8156                          ip_version);
8157         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8158         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8159         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8160 }
8161
8162 /**
8163  * Add Ethernet item to matcher and to the value.
8164  *
8165  * @param[in, out] matcher
8166  *   Flow matcher.
8167  * @param[in, out] key
8168  *   Flow matcher value.
8169  * @param[in] item
8170  *   Flow pattern to translate.
8171  * @param[in] inner
8172  *   Item is inner pattern.
8173  */
8174 static void
8175 flow_dv_translate_item_eth(void *matcher, void *key,
8176                            const struct rte_flow_item *item, int inner,
8177                            uint32_t group)
8178 {
8179         const struct rte_flow_item_eth *eth_m = item->mask;
8180         const struct rte_flow_item_eth *eth_v = item->spec;
8181         const struct rte_flow_item_eth nic_mask = {
8182                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8183                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8184                 .type = RTE_BE16(0xffff),
8185                 .has_vlan = 0,
8186         };
8187         void *hdrs_m;
8188         void *hdrs_v;
8189         char *l24_v;
8190         unsigned int i;
8191
8192         if (!eth_v)
8193                 return;
8194         if (!eth_m)
8195                 eth_m = &nic_mask;
8196         if (inner) {
8197                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8198                                          inner_headers);
8199                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8200         } else {
8201                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8202                                          outer_headers);
8203                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8204         }
8205         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8206                &eth_m->dst, sizeof(eth_m->dst));
8207         /* The value must be in the range of the mask. */
8208         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8209         for (i = 0; i < sizeof(eth_m->dst); ++i)
8210                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8211         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8212                &eth_m->src, sizeof(eth_m->src));
8213         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8214         /* The value must be in the range of the mask. */
8215         for (i = 0; i < sizeof(eth_m->dst); ++i)
8216                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8217         /*
8218          * HW supports match on one Ethertype, the Ethertype following the last
8219          * VLAN tag of the packet (see PRM).
8220          * Set match on ethertype only if ETH header is not followed by VLAN.
8221          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8222          * ethertype, and use ip_version field instead.
8223          * eCPRI over Ether layer will use type value 0xAEFE.
8224          */
8225         if (eth_m->type == 0xFFFF) {
8226                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8227                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8228                 switch (eth_v->type) {
8229                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8230                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8231                         return;
8232                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8233                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8234                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8235                         return;
8236                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8237                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8238                         return;
8239                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8240                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8241                         return;
8242                 default:
8243                         break;
8244                 }
8245         }
8246         if (eth_m->has_vlan) {
8247                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8248                 if (eth_v->has_vlan) {
8249                         /*
8250                          * Here, when also has_more_vlan field in VLAN item is
8251                          * not set, only single-tagged packets will be matched.
8252                          */
8253                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8254                         return;
8255                 }
8256         }
8257         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8258                  rte_be_to_cpu_16(eth_m->type));
8259         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8260         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8261 }
8262
8263 /**
8264  * Add VLAN item to matcher and to the value.
8265  *
8266  * @param[in, out] dev_flow
8267  *   Flow descriptor.
8268  * @param[in, out] matcher
8269  *   Flow matcher.
8270  * @param[in, out] key
8271  *   Flow matcher value.
8272  * @param[in] item
8273  *   Flow pattern to translate.
8274  * @param[in] inner
8275  *   Item is inner pattern.
8276  */
8277 static void
8278 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8279                             void *matcher, void *key,
8280                             const struct rte_flow_item *item,
8281                             int inner, uint32_t group)
8282 {
8283         const struct rte_flow_item_vlan *vlan_m = item->mask;
8284         const struct rte_flow_item_vlan *vlan_v = item->spec;
8285         void *hdrs_m;
8286         void *hdrs_v;
8287         uint16_t tci_m;
8288         uint16_t tci_v;
8289
8290         if (inner) {
8291                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8292                                          inner_headers);
8293                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8294         } else {
8295                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8296                                          outer_headers);
8297                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8298                 /*
8299                  * This is workaround, masks are not supported,
8300                  * and pre-validated.
8301                  */
8302                 if (vlan_v)
8303                         dev_flow->handle->vf_vlan.tag =
8304                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8305         }
8306         /*
8307          * When VLAN item exists in flow, mark packet as tagged,
8308          * even if TCI is not specified.
8309          */
8310         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8311                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8312                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8313         }
8314         if (!vlan_v)
8315                 return;
8316         if (!vlan_m)
8317                 vlan_m = &rte_flow_item_vlan_mask;
8318         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8319         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8320         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8321         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8322         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8323         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8324         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8325         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8326         /*
8327          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8328          * ethertype, and use ip_version field instead.
8329          */
8330         if (vlan_m->inner_type == 0xFFFF) {
8331                 switch (vlan_v->inner_type) {
8332                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8333                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8334                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8335                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8336                         return;
8337                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8338                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8339                         return;
8340                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8341                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8342                         return;
8343                 default:
8344                         break;
8345                 }
8346         }
8347         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8348                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8349                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8350                 /* Only one vlan_tag bit can be set. */
8351                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8352                 return;
8353         }
8354         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8355                  rte_be_to_cpu_16(vlan_m->inner_type));
8356         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8357                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8358 }
8359
8360 /**
8361  * Add IPV4 item to matcher and to the value.
8362  *
8363  * @param[in, out] matcher
8364  *   Flow matcher.
8365  * @param[in, out] key
8366  *   Flow matcher value.
8367  * @param[in] item
8368  *   Flow pattern to translate.
8369  * @param[in] inner
8370  *   Item is inner pattern.
8371  * @param[in] group
8372  *   The group to insert the rule.
8373  */
8374 static void
8375 flow_dv_translate_item_ipv4(void *matcher, void *key,
8376                             const struct rte_flow_item *item,
8377                             int inner, uint32_t group)
8378 {
8379         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8380         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8381         const struct rte_flow_item_ipv4 nic_mask = {
8382                 .hdr = {
8383                         .src_addr = RTE_BE32(0xffffffff),
8384                         .dst_addr = RTE_BE32(0xffffffff),
8385                         .type_of_service = 0xff,
8386                         .next_proto_id = 0xff,
8387                         .time_to_live = 0xff,
8388                 },
8389         };
8390         void *headers_m;
8391         void *headers_v;
8392         char *l24_m;
8393         char *l24_v;
8394         uint8_t tos, ihl_m, ihl_v;
8395
8396         if (inner) {
8397                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8398                                          inner_headers);
8399                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8400         } else {
8401                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8402                                          outer_headers);
8403                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8404         }
8405         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8406         if (!ipv4_v)
8407                 return;
8408         if (!ipv4_m)
8409                 ipv4_m = &nic_mask;
8410         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8411                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8412         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8413                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8414         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8415         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8416         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8417                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8418         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8419                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8420         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8421         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8422         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8423         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8424         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8425         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8426         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8427         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8428                  ipv4_m->hdr.type_of_service);
8429         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8430         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8431                  ipv4_m->hdr.type_of_service >> 2);
8432         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8433         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8434                  ipv4_m->hdr.next_proto_id);
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8436                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8437         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8438                  ipv4_m->hdr.time_to_live);
8439         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8440                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8441         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8442                  !!(ipv4_m->hdr.fragment_offset));
8443         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8444                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8445 }
8446
8447 /**
8448  * Add IPV6 item to matcher and to the value.
8449  *
8450  * @param[in, out] matcher
8451  *   Flow matcher.
8452  * @param[in, out] key
8453  *   Flow matcher value.
8454  * @param[in] item
8455  *   Flow pattern to translate.
8456  * @param[in] inner
8457  *   Item is inner pattern.
8458  * @param[in] group
8459  *   The group to insert the rule.
8460  */
8461 static void
8462 flow_dv_translate_item_ipv6(void *matcher, void *key,
8463                             const struct rte_flow_item *item,
8464                             int inner, uint32_t group)
8465 {
8466         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8467         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8468         const struct rte_flow_item_ipv6 nic_mask = {
8469                 .hdr = {
8470                         .src_addr =
8471                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8472                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8473                         .dst_addr =
8474                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8475                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8476                         .vtc_flow = RTE_BE32(0xffffffff),
8477                         .proto = 0xff,
8478                         .hop_limits = 0xff,
8479                 },
8480         };
8481         void *headers_m;
8482         void *headers_v;
8483         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8484         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8485         char *l24_m;
8486         char *l24_v;
8487         uint32_t vtc_m;
8488         uint32_t vtc_v;
8489         int i;
8490         int size;
8491
8492         if (inner) {
8493                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8494                                          inner_headers);
8495                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8496         } else {
8497                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8498                                          outer_headers);
8499                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8500         }
8501         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8502         if (!ipv6_v)
8503                 return;
8504         if (!ipv6_m)
8505                 ipv6_m = &nic_mask;
8506         size = sizeof(ipv6_m->hdr.dst_addr);
8507         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8508                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8509         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8510                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8511         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8512         for (i = 0; i < size; ++i)
8513                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8514         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8515                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8516         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8517                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8518         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8519         for (i = 0; i < size; ++i)
8520                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8521         /* TOS. */
8522         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8523         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8524         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8525         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8526         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8527         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8528         /* Label. */
8529         if (inner) {
8530                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8531                          vtc_m);
8532                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8533                          vtc_v);
8534         } else {
8535                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8536                          vtc_m);
8537                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8538                          vtc_v);
8539         }
8540         /* Protocol. */
8541         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8542                  ipv6_m->hdr.proto);
8543         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8544                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8545         /* Hop limit. */
8546         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8547                  ipv6_m->hdr.hop_limits);
8548         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8549                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8551                  !!(ipv6_m->has_frag_ext));
8552         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8553                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8554 }
8555
8556 /**
8557  * Add IPV6 fragment extension item to matcher and to the value.
8558  *
8559  * @param[in, out] matcher
8560  *   Flow matcher.
8561  * @param[in, out] key
8562  *   Flow matcher value.
8563  * @param[in] item
8564  *   Flow pattern to translate.
8565  * @param[in] inner
8566  *   Item is inner pattern.
8567  */
8568 static void
8569 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8570                                      const struct rte_flow_item *item,
8571                                      int inner)
8572 {
8573         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8574         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8575         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8576                 .hdr = {
8577                         .next_header = 0xff,
8578                         .frag_data = RTE_BE16(0xffff),
8579                 },
8580         };
8581         void *headers_m;
8582         void *headers_v;
8583
8584         if (inner) {
8585                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8586                                          inner_headers);
8587                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8588         } else {
8589                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8590                                          outer_headers);
8591                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8592         }
8593         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8594         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8595         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8596         if (!ipv6_frag_ext_v)
8597                 return;
8598         if (!ipv6_frag_ext_m)
8599                 ipv6_frag_ext_m = &nic_mask;
8600         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8601                  ipv6_frag_ext_m->hdr.next_header);
8602         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8603                  ipv6_frag_ext_v->hdr.next_header &
8604                  ipv6_frag_ext_m->hdr.next_header);
8605 }
8606
8607 /**
8608  * Add TCP item to matcher and to the value.
8609  *
8610  * @param[in, out] matcher
8611  *   Flow matcher.
8612  * @param[in, out] key
8613  *   Flow matcher value.
8614  * @param[in] item
8615  *   Flow pattern to translate.
8616  * @param[in] inner
8617  *   Item is inner pattern.
8618  */
8619 static void
8620 flow_dv_translate_item_tcp(void *matcher, void *key,
8621                            const struct rte_flow_item *item,
8622                            int inner)
8623 {
8624         const struct rte_flow_item_tcp *tcp_m = item->mask;
8625         const struct rte_flow_item_tcp *tcp_v = item->spec;
8626         void *headers_m;
8627         void *headers_v;
8628
8629         if (inner) {
8630                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8631                                          inner_headers);
8632                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8633         } else {
8634                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8635                                          outer_headers);
8636                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8637         }
8638         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8639         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8640         if (!tcp_v)
8641                 return;
8642         if (!tcp_m)
8643                 tcp_m = &rte_flow_item_tcp_mask;
8644         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8645                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8646         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8647                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8648         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8649                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8650         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8651                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8652         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8653                  tcp_m->hdr.tcp_flags);
8654         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8655                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8656 }
8657
8658 /**
8659  * Add UDP item to matcher and to the value.
8660  *
8661  * @param[in, out] matcher
8662  *   Flow matcher.
8663  * @param[in, out] key
8664  *   Flow matcher value.
8665  * @param[in] item
8666  *   Flow pattern to translate.
8667  * @param[in] inner
8668  *   Item is inner pattern.
8669  */
8670 static void
8671 flow_dv_translate_item_udp(void *matcher, void *key,
8672                            const struct rte_flow_item *item,
8673                            int inner)
8674 {
8675         const struct rte_flow_item_udp *udp_m = item->mask;
8676         const struct rte_flow_item_udp *udp_v = item->spec;
8677         void *headers_m;
8678         void *headers_v;
8679
8680         if (inner) {
8681                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8682                                          inner_headers);
8683                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8684         } else {
8685                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8686                                          outer_headers);
8687                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8688         }
8689         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8690         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8691         if (!udp_v)
8692                 return;
8693         if (!udp_m)
8694                 udp_m = &rte_flow_item_udp_mask;
8695         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8696                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8697         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8698                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8699         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8700                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8701         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8702                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8703 }
8704
8705 /**
8706  * Add GRE optional Key item to matcher and to the value.
8707  *
8708  * @param[in, out] matcher
8709  *   Flow matcher.
8710  * @param[in, out] key
8711  *   Flow matcher value.
8712  * @param[in] item
8713  *   Flow pattern to translate.
8714  * @param[in] inner
8715  *   Item is inner pattern.
8716  */
8717 static void
8718 flow_dv_translate_item_gre_key(void *matcher, void *key,
8719                                    const struct rte_flow_item *item)
8720 {
8721         const rte_be32_t *key_m = item->mask;
8722         const rte_be32_t *key_v = item->spec;
8723         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8724         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8725         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8726
8727         /* GRE K bit must be on and should already be validated */
8728         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8729         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8730         if (!key_v)
8731                 return;
8732         if (!key_m)
8733                 key_m = &gre_key_default_mask;
8734         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8735                  rte_be_to_cpu_32(*key_m) >> 8);
8736         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8737                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8738         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8739                  rte_be_to_cpu_32(*key_m) & 0xFF);
8740         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8741                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8742 }
8743
8744 /**
8745  * Add GRE item to matcher and to the value.
8746  *
8747  * @param[in, out] matcher
8748  *   Flow matcher.
8749  * @param[in, out] key
8750  *   Flow matcher value.
8751  * @param[in] item
8752  *   Flow pattern to translate.
8753  * @param[in] pattern_flags
8754  *   Accumulated pattern flags.
8755  */
8756 static void
8757 flow_dv_translate_item_gre(void *matcher, void *key,
8758                            const struct rte_flow_item *item,
8759                            uint64_t pattern_flags)
8760 {
8761         static const struct rte_flow_item_gre empty_gre = {0,};
8762         const struct rte_flow_item_gre *gre_m = item->mask;
8763         const struct rte_flow_item_gre *gre_v = item->spec;
8764         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8765         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8766         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8767         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8768         struct {
8769                 union {
8770                         __extension__
8771                         struct {
8772                                 uint16_t version:3;
8773                                 uint16_t rsvd0:9;
8774                                 uint16_t s_present:1;
8775                                 uint16_t k_present:1;
8776                                 uint16_t rsvd_bit1:1;
8777                                 uint16_t c_present:1;
8778                         };
8779                         uint16_t value;
8780                 };
8781         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8782         uint16_t protocol_m, protocol_v;
8783
8784         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8785         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8786         if (!gre_v) {
8787                 gre_v = &empty_gre;
8788                 gre_m = &empty_gre;
8789         } else {
8790                 if (!gre_m)
8791                         gre_m = &rte_flow_item_gre_mask;
8792         }
8793         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8794         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8795         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8796                  gre_crks_rsvd0_ver_m.c_present);
8797         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8798                  gre_crks_rsvd0_ver_v.c_present &
8799                  gre_crks_rsvd0_ver_m.c_present);
8800         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8801                  gre_crks_rsvd0_ver_m.k_present);
8802         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8803                  gre_crks_rsvd0_ver_v.k_present &
8804                  gre_crks_rsvd0_ver_m.k_present);
8805         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8806                  gre_crks_rsvd0_ver_m.s_present);
8807         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8808                  gre_crks_rsvd0_ver_v.s_present &
8809                  gre_crks_rsvd0_ver_m.s_present);
8810         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8811         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8812         if (!protocol_m) {
8813                 /* Force next protocol to prevent matchers duplication */
8814                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8815                 if (protocol_v)
8816                         protocol_m = 0xFFFF;
8817         }
8818         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8819         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8820                  protocol_m & protocol_v);
8821 }
8822
8823 /**
8824  * Add NVGRE item to matcher and to the value.
8825  *
8826  * @param[in, out] matcher
8827  *   Flow matcher.
8828  * @param[in, out] key
8829  *   Flow matcher value.
8830  * @param[in] item
8831  *   Flow pattern to translate.
8832  * @param[in] pattern_flags
8833  *   Accumulated pattern flags.
8834  */
8835 static void
8836 flow_dv_translate_item_nvgre(void *matcher, void *key,
8837                              const struct rte_flow_item *item,
8838                              unsigned long pattern_flags)
8839 {
8840         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8841         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8842         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8843         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8844         const char *tni_flow_id_m;
8845         const char *tni_flow_id_v;
8846         char *gre_key_m;
8847         char *gre_key_v;
8848         int size;
8849         int i;
8850
8851         /* For NVGRE, GRE header fields must be set with defined values. */
8852         const struct rte_flow_item_gre gre_spec = {
8853                 .c_rsvd0_ver = RTE_BE16(0x2000),
8854                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8855         };
8856         const struct rte_flow_item_gre gre_mask = {
8857                 .c_rsvd0_ver = RTE_BE16(0xB000),
8858                 .protocol = RTE_BE16(UINT16_MAX),
8859         };
8860         const struct rte_flow_item gre_item = {
8861                 .spec = &gre_spec,
8862                 .mask = &gre_mask,
8863                 .last = NULL,
8864         };
8865         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
8866         if (!nvgre_v)
8867                 return;
8868         if (!nvgre_m)
8869                 nvgre_m = &rte_flow_item_nvgre_mask;
8870         tni_flow_id_m = (const char *)nvgre_m->tni;
8871         tni_flow_id_v = (const char *)nvgre_v->tni;
8872         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8873         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8874         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8875         memcpy(gre_key_m, tni_flow_id_m, size);
8876         for (i = 0; i < size; ++i)
8877                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8878 }
8879
8880 /**
8881  * Add VXLAN item to matcher and to the value.
8882  *
8883  * @param[in] dev
8884  *   Pointer to the Ethernet device structure.
8885  * @param[in] attr
8886  *   Flow rule attributes.
8887  * @param[in, out] matcher
8888  *   Flow matcher.
8889  * @param[in, out] key
8890  *   Flow matcher value.
8891  * @param[in] item
8892  *   Flow pattern to translate.
8893  * @param[in] inner
8894  *   Item is inner pattern.
8895  */
8896 static void
8897 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8898                              const struct rte_flow_attr *attr,
8899                              void *matcher, void *key,
8900                              const struct rte_flow_item *item,
8901                              int inner)
8902 {
8903         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8904         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8905         void *headers_m;
8906         void *headers_v;
8907         void *misc5_m;
8908         void *misc5_v;
8909         uint32_t *tunnel_header_v;
8910         uint32_t *tunnel_header_m;
8911         uint16_t dport;
8912         struct mlx5_priv *priv = dev->data->dev_private;
8913         const struct rte_flow_item_vxlan nic_mask = {
8914                 .vni = "\xff\xff\xff",
8915                 .rsvd1 = 0xff,
8916         };
8917
8918         if (inner) {
8919                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8920                                          inner_headers);
8921                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8922         } else {
8923                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8924                                          outer_headers);
8925                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8926         }
8927         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8928                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8929         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8930                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8931                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8932         }
8933         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8934         if (!vxlan_v)
8935                 return;
8936         if (!vxlan_m) {
8937                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8938                     (attr->group && !priv->sh->misc5_cap))
8939                         vxlan_m = &rte_flow_item_vxlan_mask;
8940                 else
8941                         vxlan_m = &nic_mask;
8942         }
8943         if ((priv->sh->steering_format_version ==
8944             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8945             dport != MLX5_UDP_PORT_VXLAN) ||
8946             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8947             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8948                 void *misc_m;
8949                 void *misc_v;
8950                 char *vni_m;
8951                 char *vni_v;
8952                 int size;
8953                 int i;
8954                 misc_m = MLX5_ADDR_OF(fte_match_param,
8955                                       matcher, misc_parameters);
8956                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8957                 size = sizeof(vxlan_m->vni);
8958                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8959                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8960                 memcpy(vni_m, vxlan_m->vni, size);
8961                 for (i = 0; i < size; ++i)
8962                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8963                 return;
8964         }
8965         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8966         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8967         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8968                                                    misc5_v,
8969                                                    tunnel_header_1);
8970         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8971                                                    misc5_m,
8972                                                    tunnel_header_1);
8973         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8974                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8975                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8976         if (*tunnel_header_v)
8977                 *tunnel_header_m = vxlan_m->vni[0] |
8978                         vxlan_m->vni[1] << 8 |
8979                         vxlan_m->vni[2] << 16;
8980         else
8981                 *tunnel_header_m = 0x0;
8982         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8983         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8984                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8985 }
8986
8987 /**
8988  * Add VXLAN-GPE item to matcher and to the value.
8989  *
8990  * @param[in, out] matcher
8991  *   Flow matcher.
8992  * @param[in, out] key
8993  *   Flow matcher value.
8994  * @param[in] item
8995  *   Flow pattern to translate.
8996  * @param[in] inner
8997  *   Item is inner pattern.
8998  */
8999
9000 static void
9001 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9002                                  const struct rte_flow_item *item,
9003                                  const uint64_t pattern_flags)
9004 {
9005         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9006         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9007         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9008         /* The item was validated to be on the outer side */
9009         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9010         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9011         void *misc_m =
9012                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9013         void *misc_v =
9014                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9015         char *vni_m =
9016                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9017         char *vni_v =
9018                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9019         int i, size = sizeof(vxlan_m->vni);
9020         uint8_t flags_m = 0xff;
9021         uint8_t flags_v = 0xc;
9022         uint8_t m_protocol, v_protocol;
9023
9024         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9025                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9026                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9027                          MLX5_UDP_PORT_VXLAN_GPE);
9028         }
9029         if (!vxlan_v) {
9030                 vxlan_v = &dummy_vxlan_gpe_hdr;
9031                 vxlan_m = &dummy_vxlan_gpe_hdr;
9032         } else {
9033                 if (!vxlan_m)
9034                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9035         }
9036         memcpy(vni_m, vxlan_m->vni, size);
9037         for (i = 0; i < size; ++i)
9038                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9039         if (vxlan_m->flags) {
9040                 flags_m = vxlan_m->flags;
9041                 flags_v = vxlan_v->flags;
9042         }
9043         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9044         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9045         m_protocol = vxlan_m->protocol;
9046         v_protocol = vxlan_v->protocol;
9047         if (!m_protocol) {
9048                 /* Force next protocol to ensure next headers parsing. */
9049                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9050                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9051                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9052                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9053                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9054                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9055                 if (v_protocol)
9056                         m_protocol = 0xFF;
9057         }
9058         MLX5_SET(fte_match_set_misc3, misc_m,
9059                  outer_vxlan_gpe_next_protocol, m_protocol);
9060         MLX5_SET(fte_match_set_misc3, misc_v,
9061                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9062 }
9063
9064 /**
9065  * Add Geneve item to matcher and to the value.
9066  *
9067  * @param[in, out] matcher
9068  *   Flow matcher.
9069  * @param[in, out] key
9070  *   Flow matcher value.
9071  * @param[in] item
9072  *   Flow pattern to translate.
9073  * @param[in] inner
9074  *   Item is inner pattern.
9075  */
9076
9077 static void
9078 flow_dv_translate_item_geneve(void *matcher, void *key,
9079                               const struct rte_flow_item *item,
9080                               uint64_t pattern_flags)
9081 {
9082         static const struct rte_flow_item_geneve empty_geneve = {0,};
9083         const struct rte_flow_item_geneve *geneve_m = item->mask;
9084         const struct rte_flow_item_geneve *geneve_v = item->spec;
9085         /* GENEVE flow item validation allows single tunnel item */
9086         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9087         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9088         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9089         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9090         uint16_t gbhdr_m;
9091         uint16_t gbhdr_v;
9092         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9093         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9094         size_t size = sizeof(geneve_m->vni), i;
9095         uint16_t protocol_m, protocol_v;
9096
9097         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9098                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9099                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9100                          MLX5_UDP_PORT_GENEVE);
9101         }
9102         if (!geneve_v) {
9103                 geneve_v = &empty_geneve;
9104                 geneve_m = &empty_geneve;
9105         } else {
9106                 if (!geneve_m)
9107                         geneve_m = &rte_flow_item_geneve_mask;
9108         }
9109         memcpy(vni_m, geneve_m->vni, size);
9110         for (i = 0; i < size; ++i)
9111                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9112         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9113         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9114         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9115                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9116         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9117                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9118         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9119                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9120         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9121                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9122                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9123         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9124         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9125         if (!protocol_m) {
9126                 /* Force next protocol to prevent matchers duplication */
9127                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9128                 if (protocol_v)
9129                         protocol_m = 0xFFFF;
9130         }
9131         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9132         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9133                  protocol_m & protocol_v);
9134 }
9135
9136 /**
9137  * Create Geneve TLV option resource.
9138  *
9139  * @param dev[in, out]
9140  *   Pointer to rte_eth_dev structure.
9141  * @param[in, out] tag_be24
9142  *   Tag value in big endian then R-shift 8.
9143  * @parm[in, out] dev_flow
9144  *   Pointer to the dev_flow.
9145  * @param[out] error
9146  *   pointer to error structure.
9147  *
9148  * @return
9149  *   0 on success otherwise -errno and errno is set.
9150  */
9151
9152 int
9153 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9154                                              const struct rte_flow_item *item,
9155                                              struct rte_flow_error *error)
9156 {
9157         struct mlx5_priv *priv = dev->data->dev_private;
9158         struct mlx5_dev_ctx_shared *sh = priv->sh;
9159         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9160                         sh->geneve_tlv_option_resource;
9161         struct mlx5_devx_obj *obj;
9162         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9163         int ret = 0;
9164
9165         if (!geneve_opt_v)
9166                 return -1;
9167         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9168         if (geneve_opt_resource != NULL) {
9169                 if (geneve_opt_resource->option_class ==
9170                         geneve_opt_v->option_class &&
9171                         geneve_opt_resource->option_type ==
9172                         geneve_opt_v->option_type &&
9173                         geneve_opt_resource->length ==
9174                         geneve_opt_v->option_len) {
9175                         /* We already have GENEVE TLV option obj allocated. */
9176                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9177                                            __ATOMIC_RELAXED);
9178                 } else {
9179                         ret = rte_flow_error_set(error, ENOMEM,
9180                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9181                                 "Only one GENEVE TLV option supported");
9182                         goto exit;
9183                 }
9184         } else {
9185                 /* Create a GENEVE TLV object and resource. */
9186                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9187                                 geneve_opt_v->option_class,
9188                                 geneve_opt_v->option_type,
9189                                 geneve_opt_v->option_len);
9190                 if (!obj) {
9191                         ret = rte_flow_error_set(error, ENODATA,
9192                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9193                                 "Failed to create GENEVE TLV Devx object");
9194                         goto exit;
9195                 }
9196                 sh->geneve_tlv_option_resource =
9197                                 mlx5_malloc(MLX5_MEM_ZERO,
9198                                                 sizeof(*geneve_opt_resource),
9199                                                 0, SOCKET_ID_ANY);
9200                 if (!sh->geneve_tlv_option_resource) {
9201                         claim_zero(mlx5_devx_cmd_destroy(obj));
9202                         ret = rte_flow_error_set(error, ENOMEM,
9203                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9204                                 "GENEVE TLV object memory allocation failed");
9205                         goto exit;
9206                 }
9207                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9208                 geneve_opt_resource->obj = obj;
9209                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9210                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9211                 geneve_opt_resource->length = geneve_opt_v->option_len;
9212                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9213                                 __ATOMIC_RELAXED);
9214         }
9215 exit:
9216         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9217         return ret;
9218 }
9219
9220 /**
9221  * Add Geneve TLV option item to matcher.
9222  *
9223  * @param[in, out] dev
9224  *   Pointer to rte_eth_dev structure.
9225  * @param[in, out] matcher
9226  *   Flow matcher.
9227  * @param[in, out] key
9228  *   Flow matcher value.
9229  * @param[in] item
9230  *   Flow pattern to translate.
9231  * @param[out] error
9232  *   Pointer to error structure.
9233  */
9234 static int
9235 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9236                                   void *key, const struct rte_flow_item *item,
9237                                   struct rte_flow_error *error)
9238 {
9239         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9240         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9241         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9242         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9243         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9244                         misc_parameters_3);
9245         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9246         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9247         int ret = 0;
9248
9249         if (!geneve_opt_v)
9250                 return -1;
9251         if (!geneve_opt_m)
9252                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9253         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9254                                                            error);
9255         if (ret) {
9256                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9257                 return ret;
9258         }
9259         /*
9260          * Set the option length in GENEVE header if not requested.
9261          * The GENEVE TLV option length is expressed by the option length field
9262          * in the GENEVE header.
9263          * If the option length was not requested but the GENEVE TLV option item
9264          * is present we set the option length field implicitly.
9265          */
9266         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9267                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9268                          MLX5_GENEVE_OPTLEN_MASK);
9269                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9270                          geneve_opt_v->option_len + 1);
9271         }
9272         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9273         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9274         /* Set the data. */
9275         if (geneve_opt_v->data) {
9276                 memcpy(&opt_data_key, geneve_opt_v->data,
9277                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9278                                 sizeof(opt_data_key)));
9279                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9280                                 sizeof(opt_data_key));
9281                 memcpy(&opt_data_mask, geneve_opt_m->data,
9282                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9283                                 sizeof(opt_data_mask)));
9284                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9285                                 sizeof(opt_data_mask));
9286                 MLX5_SET(fte_match_set_misc3, misc3_m,
9287                                 geneve_tlv_option_0_data,
9288                                 rte_be_to_cpu_32(opt_data_mask));
9289                 MLX5_SET(fte_match_set_misc3, misc3_v,
9290                                 geneve_tlv_option_0_data,
9291                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9292         }
9293         return ret;
9294 }
9295
9296 /**
9297  * Add MPLS item to matcher and to the value.
9298  *
9299  * @param[in, out] matcher
9300  *   Flow matcher.
9301  * @param[in, out] key
9302  *   Flow matcher value.
9303  * @param[in] item
9304  *   Flow pattern to translate.
9305  * @param[in] prev_layer
9306  *   The protocol layer indicated in previous item.
9307  * @param[in] inner
9308  *   Item is inner pattern.
9309  */
9310 static void
9311 flow_dv_translate_item_mpls(void *matcher, void *key,
9312                             const struct rte_flow_item *item,
9313                             uint64_t prev_layer,
9314                             int inner)
9315 {
9316         const uint32_t *in_mpls_m = item->mask;
9317         const uint32_t *in_mpls_v = item->spec;
9318         uint32_t *out_mpls_m = 0;
9319         uint32_t *out_mpls_v = 0;
9320         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9321         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9322         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9323                                      misc_parameters_2);
9324         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9325         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9326         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9327
9328         switch (prev_layer) {
9329         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9330                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9331                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9332                                  0xffff);
9333                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9334                                  MLX5_UDP_PORT_MPLS);
9335                 }
9336                 break;
9337         case MLX5_FLOW_LAYER_GRE:
9338                 /* Fall-through. */
9339         case MLX5_FLOW_LAYER_GRE_KEY:
9340                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9341                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9342                                  0xffff);
9343                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9344                                  RTE_ETHER_TYPE_MPLS);
9345                 }
9346                 break;
9347         default:
9348                 break;
9349         }
9350         if (!in_mpls_v)
9351                 return;
9352         if (!in_mpls_m)
9353                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9354         switch (prev_layer) {
9355         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9356                 out_mpls_m =
9357                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9358                                                  outer_first_mpls_over_udp);
9359                 out_mpls_v =
9360                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9361                                                  outer_first_mpls_over_udp);
9362                 break;
9363         case MLX5_FLOW_LAYER_GRE:
9364                 out_mpls_m =
9365                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9366                                                  outer_first_mpls_over_gre);
9367                 out_mpls_v =
9368                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9369                                                  outer_first_mpls_over_gre);
9370                 break;
9371         default:
9372                 /* Inner MPLS not over GRE is not supported. */
9373                 if (!inner) {
9374                         out_mpls_m =
9375                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9376                                                          misc2_m,
9377                                                          outer_first_mpls);
9378                         out_mpls_v =
9379                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9380                                                          misc2_v,
9381                                                          outer_first_mpls);
9382                 }
9383                 break;
9384         }
9385         if (out_mpls_m && out_mpls_v) {
9386                 *out_mpls_m = *in_mpls_m;
9387                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9388         }
9389 }
9390
9391 /**
9392  * Add metadata register item to matcher
9393  *
9394  * @param[in, out] matcher
9395  *   Flow matcher.
9396  * @param[in, out] key
9397  *   Flow matcher value.
9398  * @param[in] reg_type
9399  *   Type of device metadata register
9400  * @param[in] value
9401  *   Register value
9402  * @param[in] mask
9403  *   Register mask
9404  */
9405 static void
9406 flow_dv_match_meta_reg(void *matcher, void *key,
9407                        enum modify_reg reg_type,
9408                        uint32_t data, uint32_t mask)
9409 {
9410         void *misc2_m =
9411                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9412         void *misc2_v =
9413                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9414         uint32_t temp;
9415
9416         data &= mask;
9417         switch (reg_type) {
9418         case REG_A:
9419                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9420                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9421                 break;
9422         case REG_B:
9423                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9424                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9425                 break;
9426         case REG_C_0:
9427                 /*
9428                  * The metadata register C0 field might be divided into
9429                  * source vport index and META item value, we should set
9430                  * this field according to specified mask, not as whole one.
9431                  */
9432                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9433                 temp |= mask;
9434                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9435                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9436                 temp &= ~mask;
9437                 temp |= data;
9438                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9439                 break;
9440         case REG_C_1:
9441                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9442                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9443                 break;
9444         case REG_C_2:
9445                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9446                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9447                 break;
9448         case REG_C_3:
9449                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9450                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9451                 break;
9452         case REG_C_4:
9453                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9454                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9455                 break;
9456         case REG_C_5:
9457                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9458                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9459                 break;
9460         case REG_C_6:
9461                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9462                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9463                 break;
9464         case REG_C_7:
9465                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9466                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9467                 break;
9468         default:
9469                 MLX5_ASSERT(false);
9470                 break;
9471         }
9472 }
9473
9474 /**
9475  * Add MARK item to matcher
9476  *
9477  * @param[in] dev
9478  *   The device to configure through.
9479  * @param[in, out] matcher
9480  *   Flow matcher.
9481  * @param[in, out] key
9482  *   Flow matcher value.
9483  * @param[in] item
9484  *   Flow pattern to translate.
9485  */
9486 static void
9487 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9488                             void *matcher, void *key,
9489                             const struct rte_flow_item *item)
9490 {
9491         struct mlx5_priv *priv = dev->data->dev_private;
9492         const struct rte_flow_item_mark *mark;
9493         uint32_t value;
9494         uint32_t mask;
9495
9496         mark = item->mask ? (const void *)item->mask :
9497                             &rte_flow_item_mark_mask;
9498         mask = mark->id & priv->sh->dv_mark_mask;
9499         mark = (const void *)item->spec;
9500         MLX5_ASSERT(mark);
9501         value = mark->id & priv->sh->dv_mark_mask & mask;
9502         if (mask) {
9503                 enum modify_reg reg;
9504
9505                 /* Get the metadata register index for the mark. */
9506                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9507                 MLX5_ASSERT(reg > 0);
9508                 if (reg == REG_C_0) {
9509                         struct mlx5_priv *priv = dev->data->dev_private;
9510                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9511                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9512
9513                         mask &= msk_c0;
9514                         mask <<= shl_c0;
9515                         value <<= shl_c0;
9516                 }
9517                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9518         }
9519 }
9520
9521 /**
9522  * Add META item to matcher
9523  *
9524  * @param[in] dev
9525  *   The devich to configure through.
9526  * @param[in, out] matcher
9527  *   Flow matcher.
9528  * @param[in, out] key
9529  *   Flow matcher value.
9530  * @param[in] attr
9531  *   Attributes of flow that includes this item.
9532  * @param[in] item
9533  *   Flow pattern to translate.
9534  */
9535 static void
9536 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9537                             void *matcher, void *key,
9538                             const struct rte_flow_attr *attr,
9539                             const struct rte_flow_item *item)
9540 {
9541         const struct rte_flow_item_meta *meta_m;
9542         const struct rte_flow_item_meta *meta_v;
9543
9544         meta_m = (const void *)item->mask;
9545         if (!meta_m)
9546                 meta_m = &rte_flow_item_meta_mask;
9547         meta_v = (const void *)item->spec;
9548         if (meta_v) {
9549                 int reg;
9550                 uint32_t value = meta_v->data;
9551                 uint32_t mask = meta_m->data;
9552
9553                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9554                 if (reg < 0)
9555                         return;
9556                 MLX5_ASSERT(reg != REG_NON);
9557                 if (reg == REG_C_0) {
9558                         struct mlx5_priv *priv = dev->data->dev_private;
9559                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9560                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9561
9562                         mask &= msk_c0;
9563                         mask <<= shl_c0;
9564                         value <<= shl_c0;
9565                 }
9566                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9567         }
9568 }
9569
9570 /**
9571  * Add vport metadata Reg C0 item to matcher
9572  *
9573  * @param[in, out] matcher
9574  *   Flow matcher.
9575  * @param[in, out] key
9576  *   Flow matcher value.
9577  * @param[in] reg
9578  *   Flow pattern to translate.
9579  */
9580 static void
9581 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9582                                   uint32_t value, uint32_t mask)
9583 {
9584         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9585 }
9586
9587 /**
9588  * Add tag item to matcher
9589  *
9590  * @param[in] dev
9591  *   The devich to configure through.
9592  * @param[in, out] matcher
9593  *   Flow matcher.
9594  * @param[in, out] key
9595  *   Flow matcher value.
9596  * @param[in] item
9597  *   Flow pattern to translate.
9598  */
9599 static void
9600 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9601                                 void *matcher, void *key,
9602                                 const struct rte_flow_item *item)
9603 {
9604         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9605         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9606         uint32_t mask, value;
9607
9608         MLX5_ASSERT(tag_v);
9609         value = tag_v->data;
9610         mask = tag_m ? tag_m->data : UINT32_MAX;
9611         if (tag_v->id == REG_C_0) {
9612                 struct mlx5_priv *priv = dev->data->dev_private;
9613                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9614                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9615
9616                 mask &= msk_c0;
9617                 mask <<= shl_c0;
9618                 value <<= shl_c0;
9619         }
9620         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9621 }
9622
9623 /**
9624  * Add TAG item to matcher
9625  *
9626  * @param[in] dev
9627  *   The devich to configure through.
9628  * @param[in, out] matcher
9629  *   Flow matcher.
9630  * @param[in, out] key
9631  *   Flow matcher value.
9632  * @param[in] item
9633  *   Flow pattern to translate.
9634  */
9635 static void
9636 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9637                            void *matcher, void *key,
9638                            const struct rte_flow_item *item)
9639 {
9640         const struct rte_flow_item_tag *tag_v = item->spec;
9641         const struct rte_flow_item_tag *tag_m = item->mask;
9642         enum modify_reg reg;
9643
9644         MLX5_ASSERT(tag_v);
9645         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9646         /* Get the metadata register index for the tag. */
9647         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9648         MLX5_ASSERT(reg > 0);
9649         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9650 }
9651
9652 /**
9653  * Add source vport match to the specified matcher.
9654  *
9655  * @param[in, out] matcher
9656  *   Flow matcher.
9657  * @param[in, out] key
9658  *   Flow matcher value.
9659  * @param[in] port
9660  *   Source vport value to match
9661  * @param[in] mask
9662  *   Mask
9663  */
9664 static void
9665 flow_dv_translate_item_source_vport(void *matcher, void *key,
9666                                     int16_t port, uint16_t mask)
9667 {
9668         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9669         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9670
9671         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9672         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9673 }
9674
9675 /**
9676  * Translate port-id item to eswitch match on  port-id.
9677  *
9678  * @param[in] dev
9679  *   The devich to configure through.
9680  * @param[in, out] matcher
9681  *   Flow matcher.
9682  * @param[in, out] key
9683  *   Flow matcher value.
9684  * @param[in] item
9685  *   Flow pattern to translate.
9686  * @param[in]
9687  *   Flow attributes.
9688  *
9689  * @return
9690  *   0 on success, a negative errno value otherwise.
9691  */
9692 static int
9693 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9694                                void *key, const struct rte_flow_item *item,
9695                                const struct rte_flow_attr *attr)
9696 {
9697         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9698         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9699         struct mlx5_priv *priv;
9700         uint16_t mask, id;
9701
9702         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9703                 flow_dv_translate_item_source_vport(matcher, key,
9704                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9705                 return 0;
9706         }
9707         mask = pid_m ? pid_m->id : 0xffff;
9708         id = pid_v ? pid_v->id : dev->data->port_id;
9709         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9710         if (!priv)
9711                 return -rte_errno;
9712         /*
9713          * Translate to vport field or to metadata, depending on mode.
9714          * Kernel can use either misc.source_port or half of C0 metadata
9715          * register.
9716          */
9717         if (priv->vport_meta_mask) {
9718                 /*
9719                  * Provide the hint for SW steering library
9720                  * to insert the flow into ingress domain and
9721                  * save the extra vport match.
9722                  */
9723                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9724                     priv->pf_bond < 0 && attr->transfer)
9725                         flow_dv_translate_item_source_vport
9726                                 (matcher, key, priv->vport_id, mask);
9727                 /*
9728                  * We should always set the vport metadata register,
9729                  * otherwise the SW steering library can drop
9730                  * the rule if wire vport metadata value is not zero,
9731                  * it depends on kernel configuration.
9732                  */
9733                 flow_dv_translate_item_meta_vport(matcher, key,
9734                                                   priv->vport_meta_tag,
9735                                                   priv->vport_meta_mask);
9736         } else {
9737                 flow_dv_translate_item_source_vport(matcher, key,
9738                                                     priv->vport_id, mask);
9739         }
9740         return 0;
9741 }
9742
9743 /**
9744  * Add ICMP6 item to matcher and to the value.
9745  *
9746  * @param[in, out] matcher
9747  *   Flow matcher.
9748  * @param[in, out] key
9749  *   Flow matcher value.
9750  * @param[in] item
9751  *   Flow pattern to translate.
9752  * @param[in] inner
9753  *   Item is inner pattern.
9754  */
9755 static void
9756 flow_dv_translate_item_icmp6(void *matcher, void *key,
9757                               const struct rte_flow_item *item,
9758                               int inner)
9759 {
9760         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9761         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9762         void *headers_m;
9763         void *headers_v;
9764         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9765                                      misc_parameters_3);
9766         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9767         if (inner) {
9768                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9769                                          inner_headers);
9770                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9771         } else {
9772                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9773                                          outer_headers);
9774                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9775         }
9776         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9777         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9778         if (!icmp6_v)
9779                 return;
9780         if (!icmp6_m)
9781                 icmp6_m = &rte_flow_item_icmp6_mask;
9782         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9783         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9784                  icmp6_v->type & icmp6_m->type);
9785         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9786         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9787                  icmp6_v->code & icmp6_m->code);
9788 }
9789
9790 /**
9791  * Add ICMP item to matcher and to the value.
9792  *
9793  * @param[in, out] matcher
9794  *   Flow matcher.
9795  * @param[in, out] key
9796  *   Flow matcher value.
9797  * @param[in] item
9798  *   Flow pattern to translate.
9799  * @param[in] inner
9800  *   Item is inner pattern.
9801  */
9802 static void
9803 flow_dv_translate_item_icmp(void *matcher, void *key,
9804                             const struct rte_flow_item *item,
9805                             int inner)
9806 {
9807         const struct rte_flow_item_icmp *icmp_m = item->mask;
9808         const struct rte_flow_item_icmp *icmp_v = item->spec;
9809         uint32_t icmp_header_data_m = 0;
9810         uint32_t icmp_header_data_v = 0;
9811         void *headers_m;
9812         void *headers_v;
9813         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9814                                      misc_parameters_3);
9815         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9816         if (inner) {
9817                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9818                                          inner_headers);
9819                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9820         } else {
9821                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9822                                          outer_headers);
9823                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9824         }
9825         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9826         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9827         if (!icmp_v)
9828                 return;
9829         if (!icmp_m)
9830                 icmp_m = &rte_flow_item_icmp_mask;
9831         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9832                  icmp_m->hdr.icmp_type);
9833         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9834                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9835         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9836                  icmp_m->hdr.icmp_code);
9837         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9838                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9839         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9840         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9841         if (icmp_header_data_m) {
9842                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9843                 icmp_header_data_v |=
9844                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9845                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9846                          icmp_header_data_m);
9847                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9848                          icmp_header_data_v & icmp_header_data_m);
9849         }
9850 }
9851
9852 /**
9853  * Add GTP item to matcher and to the value.
9854  *
9855  * @param[in, out] matcher
9856  *   Flow matcher.
9857  * @param[in, out] key
9858  *   Flow matcher value.
9859  * @param[in] item
9860  *   Flow pattern to translate.
9861  * @param[in] inner
9862  *   Item is inner pattern.
9863  */
9864 static void
9865 flow_dv_translate_item_gtp(void *matcher, void *key,
9866                            const struct rte_flow_item *item, int inner)
9867 {
9868         const struct rte_flow_item_gtp *gtp_m = item->mask;
9869         const struct rte_flow_item_gtp *gtp_v = item->spec;
9870         void *headers_m;
9871         void *headers_v;
9872         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9873                                      misc_parameters_3);
9874         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9875         uint16_t dport = RTE_GTPU_UDP_PORT;
9876
9877         if (inner) {
9878                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9879                                          inner_headers);
9880                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9881         } else {
9882                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9883                                          outer_headers);
9884                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9885         }
9886         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9887                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9888                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9889         }
9890         if (!gtp_v)
9891                 return;
9892         if (!gtp_m)
9893                 gtp_m = &rte_flow_item_gtp_mask;
9894         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9895                  gtp_m->v_pt_rsv_flags);
9896         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9897                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9898         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9899         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9900                  gtp_v->msg_type & gtp_m->msg_type);
9901         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9902                  rte_be_to_cpu_32(gtp_m->teid));
9903         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9904                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9905 }
9906
9907 /**
9908  * Add GTP PSC item to matcher.
9909  *
9910  * @param[in, out] matcher
9911  *   Flow matcher.
9912  * @param[in, out] key
9913  *   Flow matcher value.
9914  * @param[in] item
9915  *   Flow pattern to translate.
9916  */
9917 static int
9918 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9919                                const struct rte_flow_item *item)
9920 {
9921         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9922         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9923         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9924                         misc_parameters_3);
9925         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9926         union {
9927                 uint32_t w32;
9928                 struct {
9929                         uint16_t seq_num;
9930                         uint8_t npdu_num;
9931                         uint8_t next_ext_header_type;
9932                 };
9933         } dw_2;
9934         uint8_t gtp_flags;
9935
9936         /* Always set E-flag match on one, regardless of GTP item settings. */
9937         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9938         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9939         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9940         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9941         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9942         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9943         /*Set next extension header type. */
9944         dw_2.seq_num = 0;
9945         dw_2.npdu_num = 0;
9946         dw_2.next_ext_header_type = 0xff;
9947         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9948                  rte_cpu_to_be_32(dw_2.w32));
9949         dw_2.seq_num = 0;
9950         dw_2.npdu_num = 0;
9951         dw_2.next_ext_header_type = 0x85;
9952         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9953                  rte_cpu_to_be_32(dw_2.w32));
9954         if (gtp_psc_v) {
9955                 union {
9956                         uint32_t w32;
9957                         struct {
9958                                 uint8_t len;
9959                                 uint8_t type_flags;
9960                                 uint8_t qfi;
9961                                 uint8_t reserved;
9962                         };
9963                 } dw_0;
9964
9965                 /*Set extension header PDU type and Qos. */
9966                 if (!gtp_psc_m)
9967                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9968                 dw_0.w32 = 0;
9969                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9970                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9971                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9972                          rte_cpu_to_be_32(dw_0.w32));
9973                 dw_0.w32 = 0;
9974                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9975                                                         gtp_psc_m->hdr.type);
9976                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9977                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9978                          rte_cpu_to_be_32(dw_0.w32));
9979         }
9980         return 0;
9981 }
9982
9983 /**
9984  * Add eCPRI item to matcher and to the value.
9985  *
9986  * @param[in] dev
9987  *   The devich to configure through.
9988  * @param[in, out] matcher
9989  *   Flow matcher.
9990  * @param[in, out] key
9991  *   Flow matcher value.
9992  * @param[in] item
9993  *   Flow pattern to translate.
9994  * @param[in] last_item
9995  *   Last item flags.
9996  */
9997 static void
9998 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9999                              void *key, const struct rte_flow_item *item,
10000                              uint64_t last_item)
10001 {
10002         struct mlx5_priv *priv = dev->data->dev_private;
10003         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10004         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10005         struct rte_ecpri_common_hdr common;
10006         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10007                                      misc_parameters_4);
10008         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10009         uint32_t *samples;
10010         void *dw_m;
10011         void *dw_v;
10012
10013         /*
10014          * In case of eCPRI over Ethernet, if EtherType is not specified,
10015          * match on eCPRI EtherType implicitly.
10016          */
10017         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10018                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10019
10020                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10021                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10022                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10023                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10024                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10025                         *(uint16_t *)l2m = UINT16_MAX;
10026                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10027                 }
10028         }
10029         if (!ecpri_v)
10030                 return;
10031         if (!ecpri_m)
10032                 ecpri_m = &rte_flow_item_ecpri_mask;
10033         /*
10034          * Maximal four DW samples are supported in a single matching now.
10035          * Two are used now for a eCPRI matching:
10036          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10037          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10038          *    if any.
10039          */
10040         if (!ecpri_m->hdr.common.u32)
10041                 return;
10042         samples = priv->sh->ecpri_parser.ids;
10043         /* Need to take the whole DW as the mask to fill the entry. */
10044         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10045                             prog_sample_field_value_0);
10046         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10047                             prog_sample_field_value_0);
10048         /* Already big endian (network order) in the header. */
10049         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10050         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10051         /* Sample#0, used for matching type, offset 0. */
10052         MLX5_SET(fte_match_set_misc4, misc4_m,
10053                  prog_sample_field_id_0, samples[0]);
10054         /* It makes no sense to set the sample ID in the mask field. */
10055         MLX5_SET(fte_match_set_misc4, misc4_v,
10056                  prog_sample_field_id_0, samples[0]);
10057         /*
10058          * Checking if message body part needs to be matched.
10059          * Some wildcard rules only matching type field should be supported.
10060          */
10061         if (ecpri_m->hdr.dummy[0]) {
10062                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10063                 switch (common.type) {
10064                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10065                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10066                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10067                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10068                                             prog_sample_field_value_1);
10069                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10070                                             prog_sample_field_value_1);
10071                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10072                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10073                                             ecpri_m->hdr.dummy[0];
10074                         /* Sample#1, to match message body, offset 4. */
10075                         MLX5_SET(fte_match_set_misc4, misc4_m,
10076                                  prog_sample_field_id_1, samples[1]);
10077                         MLX5_SET(fte_match_set_misc4, misc4_v,
10078                                  prog_sample_field_id_1, samples[1]);
10079                         break;
10080                 default:
10081                         /* Others, do not match any sample ID. */
10082                         break;
10083                 }
10084         }
10085 }
10086
10087 /*
10088  * Add connection tracking status item to matcher
10089  *
10090  * @param[in] dev
10091  *   The devich to configure through.
10092  * @param[in, out] matcher
10093  *   Flow matcher.
10094  * @param[in, out] key
10095  *   Flow matcher value.
10096  * @param[in] item
10097  *   Flow pattern to translate.
10098  */
10099 static void
10100 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10101                               void *matcher, void *key,
10102                               const struct rte_flow_item *item)
10103 {
10104         uint32_t reg_value = 0;
10105         int reg_id;
10106         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10107         uint32_t reg_mask = 0;
10108         const struct rte_flow_item_conntrack *spec = item->spec;
10109         const struct rte_flow_item_conntrack *mask = item->mask;
10110         uint32_t flags;
10111         struct rte_flow_error error;
10112
10113         if (!mask)
10114                 mask = &rte_flow_item_conntrack_mask;
10115         if (!spec || !mask->flags)
10116                 return;
10117         flags = spec->flags & mask->flags;
10118         /* The conflict should be checked in the validation. */
10119         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10120                 reg_value |= MLX5_CT_SYNDROME_VALID;
10121         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10122                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10123         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10124                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10125         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10126                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10127         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10128                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10129         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10130                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10131                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10132                 reg_mask |= 0xc0;
10133         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10134                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10135         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10136                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10137         /* The REG_C_x value could be saved during startup. */
10138         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10139         if (reg_id == REG_NON)
10140                 return;
10141         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10142                                reg_value, reg_mask);
10143 }
10144
10145 static void
10146 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10147                             const struct rte_flow_item *item,
10148                             struct mlx5_flow *dev_flow, bool is_inner)
10149 {
10150         const struct rte_flow_item_flex *spec =
10151                 (const struct rte_flow_item_flex *)item->spec;
10152         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10153
10154         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10155         if (index < 0)
10156                 return;
10157         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10158                 /* Don't count both inner and outer flex items in one rule. */
10159                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10160                         MLX5_ASSERT(false);
10161                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10162         }
10163         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10164 }
10165
10166 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10167
10168 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10169         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10170                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10171
10172 /**
10173  * Calculate flow matcher enable bitmap.
10174  *
10175  * @param match_criteria
10176  *   Pointer to flow matcher criteria.
10177  *
10178  * @return
10179  *   Bitmap of enabled fields.
10180  */
10181 static uint8_t
10182 flow_dv_matcher_enable(uint32_t *match_criteria)
10183 {
10184         uint8_t match_criteria_enable;
10185
10186         match_criteria_enable =
10187                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10188                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10189         match_criteria_enable |=
10190                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10191                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10192         match_criteria_enable |=
10193                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10194                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10195         match_criteria_enable |=
10196                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10197                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10198         match_criteria_enable |=
10199                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10200                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10201         match_criteria_enable |=
10202                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10203                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10204         match_criteria_enable |=
10205                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10206                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10207         return match_criteria_enable;
10208 }
10209
10210 static void
10211 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10212 {
10213         /*
10214          * Check flow matching criteria first, subtract misc5/4 length if flow
10215          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10216          * misc5/4 are not supported, and matcher creation failure is expected
10217          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10218          * misc5 is right after misc4.
10219          */
10220         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10221                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10222                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10223                 if (!(match_criteria & (1 <<
10224                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10225                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10226                 }
10227         }
10228 }
10229
10230 static struct mlx5_list_entry *
10231 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10232                          struct mlx5_list_entry *entry, void *cb_ctx)
10233 {
10234         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10235         struct mlx5_flow_dv_matcher *ref = ctx->data;
10236         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10237                                                             typeof(*tbl), tbl);
10238         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10239                                                             sizeof(*resource),
10240                                                             0, SOCKET_ID_ANY);
10241
10242         if (!resource) {
10243                 rte_flow_error_set(ctx->error, ENOMEM,
10244                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10245                                    "cannot create matcher");
10246                 return NULL;
10247         }
10248         memcpy(resource, entry, sizeof(*resource));
10249         resource->tbl = &tbl->tbl;
10250         return &resource->entry;
10251 }
10252
10253 static void
10254 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10255                              struct mlx5_list_entry *entry)
10256 {
10257         mlx5_free(entry);
10258 }
10259
10260 struct mlx5_list_entry *
10261 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10262 {
10263         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10264         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10265         struct rte_eth_dev *dev = ctx->dev;
10266         struct mlx5_flow_tbl_data_entry *tbl_data;
10267         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10268         struct rte_flow_error *error = ctx->error;
10269         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10270         struct mlx5_flow_tbl_resource *tbl;
10271         void *domain;
10272         uint32_t idx = 0;
10273         int ret;
10274
10275         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10276         if (!tbl_data) {
10277                 rte_flow_error_set(error, ENOMEM,
10278                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10279                                    NULL,
10280                                    "cannot allocate flow table data entry");
10281                 return NULL;
10282         }
10283         tbl_data->idx = idx;
10284         tbl_data->tunnel = tt_prm->tunnel;
10285         tbl_data->group_id = tt_prm->group_id;
10286         tbl_data->external = !!tt_prm->external;
10287         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10288         tbl_data->is_egress = !!key.is_egress;
10289         tbl_data->is_transfer = !!key.is_fdb;
10290         tbl_data->dummy = !!key.dummy;
10291         tbl_data->level = key.level;
10292         tbl_data->id = key.id;
10293         tbl = &tbl_data->tbl;
10294         if (key.dummy)
10295                 return &tbl_data->entry;
10296         if (key.is_fdb)
10297                 domain = sh->fdb_domain;
10298         else if (key.is_egress)
10299                 domain = sh->tx_domain;
10300         else
10301                 domain = sh->rx_domain;
10302         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10303         if (ret) {
10304                 rte_flow_error_set(error, ENOMEM,
10305                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10306                                    NULL, "cannot create flow table object");
10307                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10308                 return NULL;
10309         }
10310         if (key.level != 0) {
10311                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10312                                         (tbl->obj, &tbl_data->jump.action);
10313                 if (ret) {
10314                         rte_flow_error_set(error, ENOMEM,
10315                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10316                                            NULL,
10317                                            "cannot create flow jump action");
10318                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10319                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10320                         return NULL;
10321                 }
10322         }
10323         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10324               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10325               key.level, key.id);
10326         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10327                                               flow_dv_matcher_create_cb,
10328                                               flow_dv_matcher_match_cb,
10329                                               flow_dv_matcher_remove_cb,
10330                                               flow_dv_matcher_clone_cb,
10331                                               flow_dv_matcher_clone_free_cb);
10332         if (!tbl_data->matchers) {
10333                 rte_flow_error_set(error, ENOMEM,
10334                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10335                                    NULL,
10336                                    "cannot create tbl matcher list");
10337                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10338                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10339                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10340                 return NULL;
10341         }
10342         return &tbl_data->entry;
10343 }
10344
10345 int
10346 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10347                      void *cb_ctx)
10348 {
10349         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10350         struct mlx5_flow_tbl_data_entry *tbl_data =
10351                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10352         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10353
10354         return tbl_data->level != key.level ||
10355                tbl_data->id != key.id ||
10356                tbl_data->dummy != key.dummy ||
10357                tbl_data->is_transfer != !!key.is_fdb ||
10358                tbl_data->is_egress != !!key.is_egress;
10359 }
10360
10361 struct mlx5_list_entry *
10362 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10363                       void *cb_ctx)
10364 {
10365         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10366         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10367         struct mlx5_flow_tbl_data_entry *tbl_data;
10368         struct rte_flow_error *error = ctx->error;
10369         uint32_t idx = 0;
10370
10371         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10372         if (!tbl_data) {
10373                 rte_flow_error_set(error, ENOMEM,
10374                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10375                                    NULL,
10376                                    "cannot allocate flow table data entry");
10377                 return NULL;
10378         }
10379         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10380         tbl_data->idx = idx;
10381         return &tbl_data->entry;
10382 }
10383
10384 void
10385 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10386 {
10387         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10388         struct mlx5_flow_tbl_data_entry *tbl_data =
10389                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10390
10391         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10392 }
10393
10394 /**
10395  * Get a flow table.
10396  *
10397  * @param[in, out] dev
10398  *   Pointer to rte_eth_dev structure.
10399  * @param[in] table_level
10400  *   Table level to use.
10401  * @param[in] egress
10402  *   Direction of the table.
10403  * @param[in] transfer
10404  *   E-Switch or NIC flow.
10405  * @param[in] dummy
10406  *   Dummy entry for dv API.
10407  * @param[in] table_id
10408  *   Table id to use.
10409  * @param[out] error
10410  *   pointer to error structure.
10411  *
10412  * @return
10413  *   Returns tables resource based on the index, NULL in case of failed.
10414  */
10415 struct mlx5_flow_tbl_resource *
10416 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10417                          uint32_t table_level, uint8_t egress,
10418                          uint8_t transfer,
10419                          bool external,
10420                          const struct mlx5_flow_tunnel *tunnel,
10421                          uint32_t group_id, uint8_t dummy,
10422                          uint32_t table_id,
10423                          struct rte_flow_error *error)
10424 {
10425         struct mlx5_priv *priv = dev->data->dev_private;
10426         union mlx5_flow_tbl_key table_key = {
10427                 {
10428                         .level = table_level,
10429                         .id = table_id,
10430                         .reserved = 0,
10431                         .dummy = !!dummy,
10432                         .is_fdb = !!transfer,
10433                         .is_egress = !!egress,
10434                 }
10435         };
10436         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10437                 .tunnel = tunnel,
10438                 .group_id = group_id,
10439                 .external = external,
10440         };
10441         struct mlx5_flow_cb_ctx ctx = {
10442                 .dev = dev,
10443                 .error = error,
10444                 .data = &table_key.v64,
10445                 .data2 = &tt_prm,
10446         };
10447         struct mlx5_list_entry *entry;
10448         struct mlx5_flow_tbl_data_entry *tbl_data;
10449
10450         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10451         if (!entry) {
10452                 rte_flow_error_set(error, ENOMEM,
10453                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10454                                    "cannot get table");
10455                 return NULL;
10456         }
10457         DRV_LOG(DEBUG, "table_level %u table_id %u "
10458                 "tunnel %u group %u registered.",
10459                 table_level, table_id,
10460                 tunnel ? tunnel->tunnel_id : 0, group_id);
10461         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10462         return &tbl_data->tbl;
10463 }
10464
10465 void
10466 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10467 {
10468         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10469         struct mlx5_flow_tbl_data_entry *tbl_data =
10470                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10471
10472         MLX5_ASSERT(entry && sh);
10473         if (tbl_data->jump.action)
10474                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10475         if (tbl_data->tbl.obj)
10476                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10477         if (tbl_data->tunnel_offload && tbl_data->external) {
10478                 struct mlx5_list_entry *he;
10479                 struct mlx5_hlist *tunnel_grp_hash;
10480                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10481                 union tunnel_tbl_key tunnel_key = {
10482                         .tunnel_id = tbl_data->tunnel ?
10483                                         tbl_data->tunnel->tunnel_id : 0,
10484                         .group = tbl_data->group_id
10485                 };
10486                 uint32_t table_level = tbl_data->level;
10487                 struct mlx5_flow_cb_ctx ctx = {
10488                         .data = (void *)&tunnel_key.val,
10489                 };
10490
10491                 tunnel_grp_hash = tbl_data->tunnel ?
10492                                         tbl_data->tunnel->groups :
10493                                         thub->groups;
10494                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10495                 if (he)
10496                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10497                 DRV_LOG(DEBUG,
10498                         "table_level %u id %u tunnel %u group %u released.",
10499                         table_level,
10500                         tbl_data->id,
10501                         tbl_data->tunnel ?
10502                         tbl_data->tunnel->tunnel_id : 0,
10503                         tbl_data->group_id);
10504         }
10505         mlx5_list_destroy(tbl_data->matchers);
10506         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10507 }
10508
10509 /**
10510  * Release a flow table.
10511  *
10512  * @param[in] sh
10513  *   Pointer to device shared structure.
10514  * @param[in] tbl
10515  *   Table resource to be released.
10516  *
10517  * @return
10518  *   Returns 0 if table was released, else return 1;
10519  */
10520 static int
10521 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10522                              struct mlx5_flow_tbl_resource *tbl)
10523 {
10524         struct mlx5_flow_tbl_data_entry *tbl_data =
10525                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10526
10527         if (!tbl)
10528                 return 0;
10529         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10530 }
10531
10532 int
10533 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10534                          struct mlx5_list_entry *entry, void *cb_ctx)
10535 {
10536         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10537         struct mlx5_flow_dv_matcher *ref = ctx->data;
10538         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10539                                                         entry);
10540
10541         return cur->crc != ref->crc ||
10542                cur->priority != ref->priority ||
10543                memcmp((const void *)cur->mask.buf,
10544                       (const void *)ref->mask.buf, ref->mask.size);
10545 }
10546
10547 struct mlx5_list_entry *
10548 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10549 {
10550         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10551         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10552         struct mlx5_flow_dv_matcher *ref = ctx->data;
10553         struct mlx5_flow_dv_matcher *resource;
10554         struct mlx5dv_flow_matcher_attr dv_attr = {
10555                 .type = IBV_FLOW_ATTR_NORMAL,
10556                 .match_mask = (void *)&ref->mask,
10557         };
10558         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10559                                                             typeof(*tbl), tbl);
10560         int ret;
10561
10562         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10563                                SOCKET_ID_ANY);
10564         if (!resource) {
10565                 rte_flow_error_set(ctx->error, ENOMEM,
10566                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10567                                    "cannot create matcher");
10568                 return NULL;
10569         }
10570         *resource = *ref;
10571         dv_attr.match_criteria_enable =
10572                 flow_dv_matcher_enable(resource->mask.buf);
10573         __flow_dv_adjust_buf_size(&ref->mask.size,
10574                                   dv_attr.match_criteria_enable);
10575         dv_attr.priority = ref->priority;
10576         if (tbl->is_egress)
10577                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10578         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10579                                                tbl->tbl.obj,
10580                                                &resource->matcher_object);
10581         if (ret) {
10582                 mlx5_free(resource);
10583                 rte_flow_error_set(ctx->error, ENOMEM,
10584                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10585                                    "cannot create matcher");
10586                 return NULL;
10587         }
10588         return &resource->entry;
10589 }
10590
10591 /**
10592  * Register the flow matcher.
10593  *
10594  * @param[in, out] dev
10595  *   Pointer to rte_eth_dev structure.
10596  * @param[in, out] matcher
10597  *   Pointer to flow matcher.
10598  * @param[in, out] key
10599  *   Pointer to flow table key.
10600  * @parm[in, out] dev_flow
10601  *   Pointer to the dev_flow.
10602  * @param[out] error
10603  *   pointer to error structure.
10604  *
10605  * @return
10606  *   0 on success otherwise -errno and errno is set.
10607  */
10608 static int
10609 flow_dv_matcher_register(struct rte_eth_dev *dev,
10610                          struct mlx5_flow_dv_matcher *ref,
10611                          union mlx5_flow_tbl_key *key,
10612                          struct mlx5_flow *dev_flow,
10613                          const struct mlx5_flow_tunnel *tunnel,
10614                          uint32_t group_id,
10615                          struct rte_flow_error *error)
10616 {
10617         struct mlx5_list_entry *entry;
10618         struct mlx5_flow_dv_matcher *resource;
10619         struct mlx5_flow_tbl_resource *tbl;
10620         struct mlx5_flow_tbl_data_entry *tbl_data;
10621         struct mlx5_flow_cb_ctx ctx = {
10622                 .error = error,
10623                 .data = ref,
10624         };
10625         /**
10626          * tunnel offload API requires this registration for cases when
10627          * tunnel match rule was inserted before tunnel set rule.
10628          */
10629         tbl = flow_dv_tbl_resource_get(dev, key->level,
10630                                        key->is_egress, key->is_fdb,
10631                                        dev_flow->external, tunnel,
10632                                        group_id, 0, key->id, error);
10633         if (!tbl)
10634                 return -rte_errno;      /* No need to refill the error info */
10635         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10636         ref->tbl = tbl;
10637         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10638         if (!entry) {
10639                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10640                 return rte_flow_error_set(error, ENOMEM,
10641                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10642                                           "cannot allocate ref memory");
10643         }
10644         resource = container_of(entry, typeof(*resource), entry);
10645         dev_flow->handle->dvh.matcher = resource;
10646         return 0;
10647 }
10648
10649 struct mlx5_list_entry *
10650 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10651 {
10652         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10653         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10654         struct mlx5_flow_dv_tag_resource *entry;
10655         uint32_t idx = 0;
10656         int ret;
10657
10658         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10659         if (!entry) {
10660                 rte_flow_error_set(ctx->error, ENOMEM,
10661                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10662                                    "cannot allocate resource memory");
10663                 return NULL;
10664         }
10665         entry->idx = idx;
10666         entry->tag_id = *(uint32_t *)(ctx->data);
10667         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10668                                                   &entry->action);
10669         if (ret) {
10670                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10671                 rte_flow_error_set(ctx->error, ENOMEM,
10672                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10673                                    NULL, "cannot create action");
10674                 return NULL;
10675         }
10676         return &entry->entry;
10677 }
10678
10679 int
10680 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10681                      void *cb_ctx)
10682 {
10683         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10684         struct mlx5_flow_dv_tag_resource *tag =
10685                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10686
10687         return *(uint32_t *)(ctx->data) != tag->tag_id;
10688 }
10689
10690 struct mlx5_list_entry *
10691 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10692                      void *cb_ctx)
10693 {
10694         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10695         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10696         struct mlx5_flow_dv_tag_resource *entry;
10697         uint32_t idx = 0;
10698
10699         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10700         if (!entry) {
10701                 rte_flow_error_set(ctx->error, ENOMEM,
10702                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10703                                    "cannot allocate tag resource memory");
10704                 return NULL;
10705         }
10706         memcpy(entry, oentry, sizeof(*entry));
10707         entry->idx = idx;
10708         return &entry->entry;
10709 }
10710
10711 void
10712 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10713 {
10714         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10715         struct mlx5_flow_dv_tag_resource *tag =
10716                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10717
10718         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10719 }
10720
10721 /**
10722  * Find existing tag resource or create and register a new one.
10723  *
10724  * @param dev[in, out]
10725  *   Pointer to rte_eth_dev structure.
10726  * @param[in, out] tag_be24
10727  *   Tag value in big endian then R-shift 8.
10728  * @parm[in, out] dev_flow
10729  *   Pointer to the dev_flow.
10730  * @param[out] error
10731  *   pointer to error structure.
10732  *
10733  * @return
10734  *   0 on success otherwise -errno and errno is set.
10735  */
10736 static int
10737 flow_dv_tag_resource_register
10738                         (struct rte_eth_dev *dev,
10739                          uint32_t tag_be24,
10740                          struct mlx5_flow *dev_flow,
10741                          struct rte_flow_error *error)
10742 {
10743         struct mlx5_priv *priv = dev->data->dev_private;
10744         struct mlx5_flow_dv_tag_resource *resource;
10745         struct mlx5_list_entry *entry;
10746         struct mlx5_flow_cb_ctx ctx = {
10747                                         .error = error,
10748                                         .data = &tag_be24,
10749                                         };
10750         struct mlx5_hlist *tag_table;
10751
10752         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10753                                       "tags",
10754                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10755                                       false, false, priv->sh,
10756                                       flow_dv_tag_create_cb,
10757                                       flow_dv_tag_match_cb,
10758                                       flow_dv_tag_remove_cb,
10759                                       flow_dv_tag_clone_cb,
10760                                       flow_dv_tag_clone_free_cb);
10761         if (unlikely(!tag_table))
10762                 return -rte_errno;
10763         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10764         if (entry) {
10765                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10766                                         entry);
10767                 dev_flow->handle->dvh.rix_tag = resource->idx;
10768                 dev_flow->dv.tag_resource = resource;
10769                 return 0;
10770         }
10771         return -rte_errno;
10772 }
10773
10774 void
10775 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10776 {
10777         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10778         struct mlx5_flow_dv_tag_resource *tag =
10779                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10780
10781         MLX5_ASSERT(tag && sh && tag->action);
10782         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10783         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10784         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10785 }
10786
10787 /**
10788  * Release the tag.
10789  *
10790  * @param dev
10791  *   Pointer to Ethernet device.
10792  * @param tag_idx
10793  *   Tag index.
10794  *
10795  * @return
10796  *   1 while a reference on it exists, 0 when freed.
10797  */
10798 static int
10799 flow_dv_tag_release(struct rte_eth_dev *dev,
10800                     uint32_t tag_idx)
10801 {
10802         struct mlx5_priv *priv = dev->data->dev_private;
10803         struct mlx5_flow_dv_tag_resource *tag;
10804
10805         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10806         if (!tag)
10807                 return 0;
10808         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10809                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10810         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10811 }
10812
10813 /**
10814  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10815  *
10816  * @param[in] dev
10817  *   Pointer to rte_eth_dev structure.
10818  * @param[in] action
10819  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10820  * @param[out] dst_port_id
10821  *   The target port ID.
10822  * @param[out] error
10823  *   Pointer to the error structure.
10824  *
10825  * @return
10826  *   0 on success, a negative errno value otherwise and rte_errno is set.
10827  */
10828 static int
10829 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10830                                  const struct rte_flow_action *action,
10831                                  uint32_t *dst_port_id,
10832                                  struct rte_flow_error *error)
10833 {
10834         uint32_t port;
10835         struct mlx5_priv *priv;
10836
10837         switch (action->type) {
10838         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10839                 const struct rte_flow_action_port_id *conf;
10840
10841                 conf = (const struct rte_flow_action_port_id *)action->conf;
10842                 port = conf->original ? dev->data->port_id : conf->id;
10843                 break;
10844         }
10845         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10846                 const struct rte_flow_action_ethdev *ethdev;
10847
10848                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10849                 port = ethdev->port_id;
10850                 break;
10851         }
10852         default:
10853                 MLX5_ASSERT(false);
10854                 return rte_flow_error_set(error, EINVAL,
10855                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10856                                           "unknown E-Switch action");
10857         }
10858
10859         priv = mlx5_port_to_eswitch_info(port, false);
10860         if (!priv)
10861                 return rte_flow_error_set(error, -rte_errno,
10862                                           RTE_FLOW_ERROR_TYPE_ACTION,
10863                                           NULL,
10864                                           "No eswitch info was found for port");
10865 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10866         /*
10867          * This parameter is transferred to
10868          * mlx5dv_dr_action_create_dest_ib_port().
10869          */
10870         *dst_port_id = priv->dev_port;
10871 #else
10872         /*
10873          * Legacy mode, no LAG configurations is supported.
10874          * This parameter is transferred to
10875          * mlx5dv_dr_action_create_dest_vport().
10876          */
10877         *dst_port_id = priv->vport_id;
10878 #endif
10879         return 0;
10880 }
10881
10882 /**
10883  * Create a counter with aging configuration.
10884  *
10885  * @param[in] dev
10886  *   Pointer to rte_eth_dev structure.
10887  * @param[in] dev_flow
10888  *   Pointer to the mlx5_flow.
10889  * @param[out] count
10890  *   Pointer to the counter action configuration.
10891  * @param[in] age
10892  *   Pointer to the aging action configuration.
10893  *
10894  * @return
10895  *   Index to flow counter on success, 0 otherwise.
10896  */
10897 static uint32_t
10898 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10899                                 struct mlx5_flow *dev_flow,
10900                                 const struct rte_flow_action_count *count
10901                                         __rte_unused,
10902                                 const struct rte_flow_action_age *age)
10903 {
10904         uint32_t counter;
10905         struct mlx5_age_param *age_param;
10906
10907         counter = flow_dv_counter_alloc(dev, !!age);
10908         if (!counter || age == NULL)
10909                 return counter;
10910         age_param = flow_dv_counter_idx_get_age(dev, counter);
10911         age_param->context = age->context ? age->context :
10912                 (void *)(uintptr_t)(dev_flow->flow_idx);
10913         age_param->timeout = age->timeout;
10914         age_param->port_id = dev->data->port_id;
10915         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10916         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10917         return counter;
10918 }
10919
10920 /**
10921  * Add Tx queue matcher
10922  *
10923  * @param[in] dev
10924  *   Pointer to the dev struct.
10925  * @param[in, out] matcher
10926  *   Flow matcher.
10927  * @param[in, out] key
10928  *   Flow matcher value.
10929  * @param[in] item
10930  *   Flow pattern to translate.
10931  * @param[in] inner
10932  *   Item is inner pattern.
10933  */
10934 static void
10935 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10936                                 void *matcher, void *key,
10937                                 const struct rte_flow_item *item)
10938 {
10939         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10940         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10941         void *misc_m =
10942                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10943         void *misc_v =
10944                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10945         struct mlx5_txq_ctrl *txq;
10946         uint32_t queue, mask;
10947
10948         queue_m = (const void *)item->mask;
10949         queue_v = (const void *)item->spec;
10950         if (!queue_v)
10951                 return;
10952         txq = mlx5_txq_get(dev, queue_v->queue);
10953         if (!txq)
10954                 return;
10955         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10956                 queue = txq->obj->sq->id;
10957         else
10958                 queue = txq->obj->sq_obj.sq->id;
10959         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10960         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10961         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10962         mlx5_txq_release(dev, queue_v->queue);
10963 }
10964
10965 /**
10966  * Set the hash fields according to the @p flow information.
10967  *
10968  * @param[in] dev_flow
10969  *   Pointer to the mlx5_flow.
10970  * @param[in] rss_desc
10971  *   Pointer to the mlx5_flow_rss_desc.
10972  */
10973 static void
10974 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10975                        struct mlx5_flow_rss_desc *rss_desc)
10976 {
10977         uint64_t items = dev_flow->handle->layers;
10978         int rss_inner = 0;
10979         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10980
10981         dev_flow->hash_fields = 0;
10982 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10983         if (rss_desc->level >= 2)
10984                 rss_inner = 1;
10985 #endif
10986         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10987             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10988                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10989                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10990                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10991                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10992                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10993                         else
10994                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10995                 }
10996         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10997                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10998                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10999                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11000                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
11001                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11002                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
11003                         else
11004                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
11005                 }
11006         }
11007         if (dev_flow->hash_fields == 0)
11008                 /*
11009                  * There is no match between the RSS types and the
11010                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11011                  */
11012                 return;
11013         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11014             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
11015                 if (rss_types & RTE_ETH_RSS_UDP) {
11016                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11017                                 dev_flow->hash_fields |=
11018                                                 IBV_RX_HASH_SRC_PORT_UDP;
11019                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11020                                 dev_flow->hash_fields |=
11021                                                 IBV_RX_HASH_DST_PORT_UDP;
11022                         else
11023                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
11024                 }
11025         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11026                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
11027                 if (rss_types & RTE_ETH_RSS_TCP) {
11028                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11029                                 dev_flow->hash_fields |=
11030                                                 IBV_RX_HASH_SRC_PORT_TCP;
11031                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11032                                 dev_flow->hash_fields |=
11033                                                 IBV_RX_HASH_DST_PORT_TCP;
11034                         else
11035                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
11036                 }
11037         }
11038         if (rss_inner)
11039                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
11040 }
11041
11042 /**
11043  * Prepare an Rx Hash queue.
11044  *
11045  * @param dev
11046  *   Pointer to Ethernet device.
11047  * @param[in] dev_flow
11048  *   Pointer to the mlx5_flow.
11049  * @param[in] rss_desc
11050  *   Pointer to the mlx5_flow_rss_desc.
11051  * @param[out] hrxq_idx
11052  *   Hash Rx queue index.
11053  *
11054  * @return
11055  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11056  */
11057 static struct mlx5_hrxq *
11058 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11059                      struct mlx5_flow *dev_flow,
11060                      struct mlx5_flow_rss_desc *rss_desc,
11061                      uint32_t *hrxq_idx)
11062 {
11063         struct mlx5_priv *priv = dev->data->dev_private;
11064         struct mlx5_flow_handle *dh = dev_flow->handle;
11065         struct mlx5_hrxq *hrxq;
11066
11067         MLX5_ASSERT(rss_desc->queue_num);
11068         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11069         rss_desc->hash_fields = dev_flow->hash_fields;
11070         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11071         rss_desc->shared_rss = 0;
11072         if (rss_desc->hash_fields == 0)
11073                 rss_desc->queue_num = 1;
11074         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11075         if (!*hrxq_idx)
11076                 return NULL;
11077         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11078                               *hrxq_idx);
11079         return hrxq;
11080 }
11081
11082 /**
11083  * Release sample sub action resource.
11084  *
11085  * @param[in, out] dev
11086  *   Pointer to rte_eth_dev structure.
11087  * @param[in] act_res
11088  *   Pointer to sample sub action resource.
11089  */
11090 static void
11091 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11092                                    struct mlx5_flow_sub_actions_idx *act_res)
11093 {
11094         if (act_res->rix_hrxq) {
11095                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11096                 act_res->rix_hrxq = 0;
11097         }
11098         if (act_res->rix_encap_decap) {
11099                 flow_dv_encap_decap_resource_release(dev,
11100                                                      act_res->rix_encap_decap);
11101                 act_res->rix_encap_decap = 0;
11102         }
11103         if (act_res->rix_port_id_action) {
11104                 flow_dv_port_id_action_resource_release(dev,
11105                                                 act_res->rix_port_id_action);
11106                 act_res->rix_port_id_action = 0;
11107         }
11108         if (act_res->rix_tag) {
11109                 flow_dv_tag_release(dev, act_res->rix_tag);
11110                 act_res->rix_tag = 0;
11111         }
11112         if (act_res->rix_jump) {
11113                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11114                 act_res->rix_jump = 0;
11115         }
11116 }
11117
11118 int
11119 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11120                         struct mlx5_list_entry *entry, void *cb_ctx)
11121 {
11122         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11123         struct rte_eth_dev *dev = ctx->dev;
11124         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11125         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11126                                                               typeof(*resource),
11127                                                               entry);
11128
11129         if (ctx_resource->ratio == resource->ratio &&
11130             ctx_resource->ft_type == resource->ft_type &&
11131             ctx_resource->ft_id == resource->ft_id &&
11132             ctx_resource->set_action == resource->set_action &&
11133             !memcmp((void *)&ctx_resource->sample_act,
11134                     (void *)&resource->sample_act,
11135                     sizeof(struct mlx5_flow_sub_actions_list))) {
11136                 /*
11137                  * Existing sample action should release the prepared
11138                  * sub-actions reference counter.
11139                  */
11140                 flow_dv_sample_sub_actions_release(dev,
11141                                                    &ctx_resource->sample_idx);
11142                 return 0;
11143         }
11144         return 1;
11145 }
11146
11147 struct mlx5_list_entry *
11148 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11149 {
11150         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11151         struct rte_eth_dev *dev = ctx->dev;
11152         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11153         void **sample_dv_actions = ctx_resource->sub_actions;
11154         struct mlx5_flow_dv_sample_resource *resource;
11155         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11156         struct mlx5_priv *priv = dev->data->dev_private;
11157         struct mlx5_dev_ctx_shared *sh = priv->sh;
11158         struct mlx5_flow_tbl_resource *tbl;
11159         uint32_t idx = 0;
11160         const uint32_t next_ft_step = 1;
11161         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11162         uint8_t is_egress = 0;
11163         uint8_t is_transfer = 0;
11164         struct rte_flow_error *error = ctx->error;
11165
11166         /* Register new sample resource. */
11167         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11168         if (!resource) {
11169                 rte_flow_error_set(error, ENOMEM,
11170                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11171                                           NULL,
11172                                           "cannot allocate resource memory");
11173                 return NULL;
11174         }
11175         *resource = *ctx_resource;
11176         /* Create normal path table level */
11177         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11178                 is_transfer = 1;
11179         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11180                 is_egress = 1;
11181         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11182                                         is_egress, is_transfer,
11183                                         true, NULL, 0, 0, 0, error);
11184         if (!tbl) {
11185                 rte_flow_error_set(error, ENOMEM,
11186                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11187                                           NULL,
11188                                           "fail to create normal path table "
11189                                           "for sample");
11190                 goto error;
11191         }
11192         resource->normal_path_tbl = tbl;
11193         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11194                 if (!sh->default_miss_action) {
11195                         rte_flow_error_set(error, ENOMEM,
11196                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11197                                                 NULL,
11198                                                 "default miss action was not "
11199                                                 "created");
11200                         goto error;
11201                 }
11202                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11203                                                 sh->default_miss_action;
11204         }
11205         /* Create a DR sample action */
11206         sampler_attr.sample_ratio = resource->ratio;
11207         sampler_attr.default_next_table = tbl->obj;
11208         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11209         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11210                                                         &sample_dv_actions[0];
11211         sampler_attr.action = resource->set_action;
11212         if (mlx5_os_flow_dr_create_flow_action_sampler
11213                         (&sampler_attr, &resource->verbs_action)) {
11214                 rte_flow_error_set(error, ENOMEM,
11215                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11216                                         NULL, "cannot create sample action");
11217                 goto error;
11218         }
11219         resource->idx = idx;
11220         resource->dev = dev;
11221         return &resource->entry;
11222 error:
11223         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11224                 flow_dv_sample_sub_actions_release(dev,
11225                                                    &resource->sample_idx);
11226         if (resource->normal_path_tbl)
11227                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11228                                 resource->normal_path_tbl);
11229         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11230         return NULL;
11231
11232 }
11233
11234 struct mlx5_list_entry *
11235 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11236                          struct mlx5_list_entry *entry __rte_unused,
11237                          void *cb_ctx)
11238 {
11239         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11240         struct rte_eth_dev *dev = ctx->dev;
11241         struct mlx5_flow_dv_sample_resource *resource;
11242         struct mlx5_priv *priv = dev->data->dev_private;
11243         struct mlx5_dev_ctx_shared *sh = priv->sh;
11244         uint32_t idx = 0;
11245
11246         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11247         if (!resource) {
11248                 rte_flow_error_set(ctx->error, ENOMEM,
11249                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11250                                           NULL,
11251                                           "cannot allocate resource memory");
11252                 return NULL;
11253         }
11254         memcpy(resource, entry, sizeof(*resource));
11255         resource->idx = idx;
11256         resource->dev = dev;
11257         return &resource->entry;
11258 }
11259
11260 void
11261 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11262                              struct mlx5_list_entry *entry)
11263 {
11264         struct mlx5_flow_dv_sample_resource *resource =
11265                                   container_of(entry, typeof(*resource), entry);
11266         struct rte_eth_dev *dev = resource->dev;
11267         struct mlx5_priv *priv = dev->data->dev_private;
11268
11269         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11270 }
11271
11272 /**
11273  * Find existing sample resource or create and register a new one.
11274  *
11275  * @param[in, out] dev
11276  *   Pointer to rte_eth_dev structure.
11277  * @param[in] ref
11278  *   Pointer to sample resource reference.
11279  * @parm[in, out] dev_flow
11280  *   Pointer to the dev_flow.
11281  * @param[out] error
11282  *   pointer to error structure.
11283  *
11284  * @return
11285  *   0 on success otherwise -errno and errno is set.
11286  */
11287 static int
11288 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11289                          struct mlx5_flow_dv_sample_resource *ref,
11290                          struct mlx5_flow *dev_flow,
11291                          struct rte_flow_error *error)
11292 {
11293         struct mlx5_flow_dv_sample_resource *resource;
11294         struct mlx5_list_entry *entry;
11295         struct mlx5_priv *priv = dev->data->dev_private;
11296         struct mlx5_flow_cb_ctx ctx = {
11297                 .dev = dev,
11298                 .error = error,
11299                 .data = ref,
11300         };
11301
11302         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11303         if (!entry)
11304                 return -rte_errno;
11305         resource = container_of(entry, typeof(*resource), entry);
11306         dev_flow->handle->dvh.rix_sample = resource->idx;
11307         dev_flow->dv.sample_res = resource;
11308         return 0;
11309 }
11310
11311 int
11312 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11313                             struct mlx5_list_entry *entry, void *cb_ctx)
11314 {
11315         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11316         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11317         struct rte_eth_dev *dev = ctx->dev;
11318         struct mlx5_flow_dv_dest_array_resource *resource =
11319                                   container_of(entry, typeof(*resource), entry);
11320         uint32_t idx = 0;
11321
11322         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11323             ctx_resource->ft_type == resource->ft_type &&
11324             !memcmp((void *)resource->sample_act,
11325                     (void *)ctx_resource->sample_act,
11326                    (ctx_resource->num_of_dest *
11327                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11328                 /*
11329                  * Existing sample action should release the prepared
11330                  * sub-actions reference counter.
11331                  */
11332                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11333                         flow_dv_sample_sub_actions_release(dev,
11334                                         &ctx_resource->sample_idx[idx]);
11335                 return 0;
11336         }
11337         return 1;
11338 }
11339
11340 struct mlx5_list_entry *
11341 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11342 {
11343         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11344         struct rte_eth_dev *dev = ctx->dev;
11345         struct mlx5_flow_dv_dest_array_resource *resource;
11346         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11347         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11348         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11349         struct mlx5_priv *priv = dev->data->dev_private;
11350         struct mlx5_dev_ctx_shared *sh = priv->sh;
11351         struct mlx5_flow_sub_actions_list *sample_act;
11352         struct mlx5dv_dr_domain *domain;
11353         uint32_t idx = 0, res_idx = 0;
11354         struct rte_flow_error *error = ctx->error;
11355         uint64_t action_flags;
11356         int ret;
11357
11358         /* Register new destination array resource. */
11359         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11360                                             &res_idx);
11361         if (!resource) {
11362                 rte_flow_error_set(error, ENOMEM,
11363                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11364                                           NULL,
11365                                           "cannot allocate resource memory");
11366                 return NULL;
11367         }
11368         *resource = *ctx_resource;
11369         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11370                 domain = sh->fdb_domain;
11371         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11372                 domain = sh->rx_domain;
11373         else
11374                 domain = sh->tx_domain;
11375         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11376                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11377                                  mlx5_malloc(MLX5_MEM_ZERO,
11378                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11379                                  0, SOCKET_ID_ANY);
11380                 if (!dest_attr[idx]) {
11381                         rte_flow_error_set(error, ENOMEM,
11382                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11383                                            NULL,
11384                                            "cannot allocate resource memory");
11385                         goto error;
11386                 }
11387                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11388                 sample_act = &ctx_resource->sample_act[idx];
11389                 action_flags = sample_act->action_flags;
11390                 switch (action_flags) {
11391                 case MLX5_FLOW_ACTION_QUEUE:
11392                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11393                         break;
11394                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11395                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11396                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11397                         dest_attr[idx]->dest_reformat->reformat =
11398                                         sample_act->dr_encap_action;
11399                         dest_attr[idx]->dest_reformat->dest =
11400                                         sample_act->dr_port_id_action;
11401                         break;
11402                 case MLX5_FLOW_ACTION_PORT_ID:
11403                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11404                         break;
11405                 case MLX5_FLOW_ACTION_JUMP:
11406                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11407                         break;
11408                 default:
11409                         rte_flow_error_set(error, EINVAL,
11410                                            RTE_FLOW_ERROR_TYPE_ACTION,
11411                                            NULL,
11412                                            "unsupported actions type");
11413                         goto error;
11414                 }
11415         }
11416         /* create a dest array action */
11417         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11418                                                 (domain,
11419                                                  resource->num_of_dest,
11420                                                  dest_attr,
11421                                                  &resource->action);
11422         if (ret) {
11423                 rte_flow_error_set(error, ENOMEM,
11424                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11425                                    NULL,
11426                                    "cannot create destination array action");
11427                 goto error;
11428         }
11429         resource->idx = res_idx;
11430         resource->dev = dev;
11431         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11432                 mlx5_free(dest_attr[idx]);
11433         return &resource->entry;
11434 error:
11435         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11436                 flow_dv_sample_sub_actions_release(dev,
11437                                                    &resource->sample_idx[idx]);
11438                 if (dest_attr[idx])
11439                         mlx5_free(dest_attr[idx]);
11440         }
11441         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11442         return NULL;
11443 }
11444
11445 struct mlx5_list_entry *
11446 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11447                             struct mlx5_list_entry *entry __rte_unused,
11448                             void *cb_ctx)
11449 {
11450         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11451         struct rte_eth_dev *dev = ctx->dev;
11452         struct mlx5_flow_dv_dest_array_resource *resource;
11453         struct mlx5_priv *priv = dev->data->dev_private;
11454         struct mlx5_dev_ctx_shared *sh = priv->sh;
11455         uint32_t res_idx = 0;
11456         struct rte_flow_error *error = ctx->error;
11457
11458         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11459                                       &res_idx);
11460         if (!resource) {
11461                 rte_flow_error_set(error, ENOMEM,
11462                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11463                                           NULL,
11464                                           "cannot allocate dest-array memory");
11465                 return NULL;
11466         }
11467         memcpy(resource, entry, sizeof(*resource));
11468         resource->idx = res_idx;
11469         resource->dev = dev;
11470         return &resource->entry;
11471 }
11472
11473 void
11474 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11475                                  struct mlx5_list_entry *entry)
11476 {
11477         struct mlx5_flow_dv_dest_array_resource *resource =
11478                         container_of(entry, typeof(*resource), entry);
11479         struct rte_eth_dev *dev = resource->dev;
11480         struct mlx5_priv *priv = dev->data->dev_private;
11481
11482         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11483 }
11484
11485 /**
11486  * Find existing destination array resource or create and register a new one.
11487  *
11488  * @param[in, out] dev
11489  *   Pointer to rte_eth_dev structure.
11490  * @param[in] ref
11491  *   Pointer to destination array resource reference.
11492  * @parm[in, out] dev_flow
11493  *   Pointer to the dev_flow.
11494  * @param[out] error
11495  *   pointer to error structure.
11496  *
11497  * @return
11498  *   0 on success otherwise -errno and errno is set.
11499  */
11500 static int
11501 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11502                          struct mlx5_flow_dv_dest_array_resource *ref,
11503                          struct mlx5_flow *dev_flow,
11504                          struct rte_flow_error *error)
11505 {
11506         struct mlx5_flow_dv_dest_array_resource *resource;
11507         struct mlx5_priv *priv = dev->data->dev_private;
11508         struct mlx5_list_entry *entry;
11509         struct mlx5_flow_cb_ctx ctx = {
11510                 .dev = dev,
11511                 .error = error,
11512                 .data = ref,
11513         };
11514
11515         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11516         if (!entry)
11517                 return -rte_errno;
11518         resource = container_of(entry, typeof(*resource), entry);
11519         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11520         dev_flow->dv.dest_array_res = resource;
11521         return 0;
11522 }
11523
11524 /**
11525  * Convert Sample action to DV specification.
11526  *
11527  * @param[in] dev
11528  *   Pointer to rte_eth_dev structure.
11529  * @param[in] action
11530  *   Pointer to sample action structure.
11531  * @param[in, out] dev_flow
11532  *   Pointer to the mlx5_flow.
11533  * @param[in] attr
11534  *   Pointer to the flow attributes.
11535  * @param[in, out] num_of_dest
11536  *   Pointer to the num of destination.
11537  * @param[in, out] sample_actions
11538  *   Pointer to sample actions list.
11539  * @param[in, out] res
11540  *   Pointer to sample resource.
11541  * @param[out] error
11542  *   Pointer to the error structure.
11543  *
11544  * @return
11545  *   0 on success, a negative errno value otherwise and rte_errno is set.
11546  */
11547 static int
11548 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11549                                 const struct rte_flow_action_sample *action,
11550                                 struct mlx5_flow *dev_flow,
11551                                 const struct rte_flow_attr *attr,
11552                                 uint32_t *num_of_dest,
11553                                 void **sample_actions,
11554                                 struct mlx5_flow_dv_sample_resource *res,
11555                                 struct rte_flow_error *error)
11556 {
11557         struct mlx5_priv *priv = dev->data->dev_private;
11558         const struct rte_flow_action *sub_actions;
11559         struct mlx5_flow_sub_actions_list *sample_act;
11560         struct mlx5_flow_sub_actions_idx *sample_idx;
11561         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11562         struct rte_flow *flow = dev_flow->flow;
11563         struct mlx5_flow_rss_desc *rss_desc;
11564         uint64_t action_flags = 0;
11565
11566         MLX5_ASSERT(wks);
11567         rss_desc = &wks->rss_desc;
11568         sample_act = &res->sample_act;
11569         sample_idx = &res->sample_idx;
11570         res->ratio = action->ratio;
11571         sub_actions = action->actions;
11572         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11573                 int type = sub_actions->type;
11574                 uint32_t pre_rix = 0;
11575                 void *pre_r;
11576                 switch (type) {
11577                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11578                 {
11579                         const struct rte_flow_action_queue *queue;
11580                         struct mlx5_hrxq *hrxq;
11581                         uint32_t hrxq_idx;
11582
11583                         queue = sub_actions->conf;
11584                         rss_desc->queue_num = 1;
11585                         rss_desc->queue[0] = queue->index;
11586                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11587                                                     rss_desc, &hrxq_idx);
11588                         if (!hrxq)
11589                                 return rte_flow_error_set
11590                                         (error, rte_errno,
11591                                          RTE_FLOW_ERROR_TYPE_ACTION,
11592                                          NULL,
11593                                          "cannot create fate queue");
11594                         sample_act->dr_queue_action = hrxq->action;
11595                         sample_idx->rix_hrxq = hrxq_idx;
11596                         sample_actions[sample_act->actions_num++] =
11597                                                 hrxq->action;
11598                         (*num_of_dest)++;
11599                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11600                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11601                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11602                         dev_flow->handle->fate_action =
11603                                         MLX5_FLOW_FATE_QUEUE;
11604                         break;
11605                 }
11606                 case RTE_FLOW_ACTION_TYPE_RSS:
11607                 {
11608                         struct mlx5_hrxq *hrxq;
11609                         uint32_t hrxq_idx;
11610                         const struct rte_flow_action_rss *rss;
11611                         const uint8_t *rss_key;
11612
11613                         rss = sub_actions->conf;
11614                         memcpy(rss_desc->queue, rss->queue,
11615                                rss->queue_num * sizeof(uint16_t));
11616                         rss_desc->queue_num = rss->queue_num;
11617                         /* NULL RSS key indicates default RSS key. */
11618                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11619                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11620                         /*
11621                          * rss->level and rss.types should be set in advance
11622                          * when expanding items for RSS.
11623                          */
11624                         flow_dv_hashfields_set(dev_flow, rss_desc);
11625                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11626                                                     rss_desc, &hrxq_idx);
11627                         if (!hrxq)
11628                                 return rte_flow_error_set
11629                                         (error, rte_errno,
11630                                          RTE_FLOW_ERROR_TYPE_ACTION,
11631                                          NULL,
11632                                          "cannot create fate queue");
11633                         sample_act->dr_queue_action = hrxq->action;
11634                         sample_idx->rix_hrxq = hrxq_idx;
11635                         sample_actions[sample_act->actions_num++] =
11636                                                 hrxq->action;
11637                         (*num_of_dest)++;
11638                         action_flags |= MLX5_FLOW_ACTION_RSS;
11639                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11640                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11641                         dev_flow->handle->fate_action =
11642                                         MLX5_FLOW_FATE_QUEUE;
11643                         break;
11644                 }
11645                 case RTE_FLOW_ACTION_TYPE_MARK:
11646                 {
11647                         uint32_t tag_be = mlx5_flow_mark_set
11648                                 (((const struct rte_flow_action_mark *)
11649                                 (sub_actions->conf))->id);
11650
11651                         wks->mark = 1;
11652                         pre_rix = dev_flow->handle->dvh.rix_tag;
11653                         /* Save the mark resource before sample */
11654                         pre_r = dev_flow->dv.tag_resource;
11655                         if (flow_dv_tag_resource_register(dev, tag_be,
11656                                                   dev_flow, error))
11657                                 return -rte_errno;
11658                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11659                         sample_act->dr_tag_action =
11660                                 dev_flow->dv.tag_resource->action;
11661                         sample_idx->rix_tag =
11662                                 dev_flow->handle->dvh.rix_tag;
11663                         sample_actions[sample_act->actions_num++] =
11664                                                 sample_act->dr_tag_action;
11665                         /* Recover the mark resource after sample */
11666                         dev_flow->dv.tag_resource = pre_r;
11667                         dev_flow->handle->dvh.rix_tag = pre_rix;
11668                         action_flags |= MLX5_FLOW_ACTION_MARK;
11669                         break;
11670                 }
11671                 case RTE_FLOW_ACTION_TYPE_COUNT:
11672                 {
11673                         if (!flow->counter) {
11674                                 flow->counter =
11675                                         flow_dv_translate_create_counter(dev,
11676                                                 dev_flow, sub_actions->conf,
11677                                                 0);
11678                                 if (!flow->counter)
11679                                         return rte_flow_error_set
11680                                                 (error, rte_errno,
11681                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11682                                                 NULL,
11683                                                 "cannot create counter"
11684                                                 " object.");
11685                         }
11686                         sample_act->dr_cnt_action =
11687                                   (flow_dv_counter_get_by_idx(dev,
11688                                   flow->counter, NULL))->action;
11689                         sample_actions[sample_act->actions_num++] =
11690                                                 sample_act->dr_cnt_action;
11691                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11692                         break;
11693                 }
11694                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11695                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11696                 {
11697                         struct mlx5_flow_dv_port_id_action_resource
11698                                         port_id_resource;
11699                         uint32_t port_id = 0;
11700
11701                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11702                         /* Save the port id resource before sample */
11703                         pre_rix = dev_flow->handle->rix_port_id_action;
11704                         pre_r = dev_flow->dv.port_id_action;
11705                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11706                                                              &port_id, error))
11707                                 return -rte_errno;
11708                         port_id_resource.port_id = port_id;
11709                         if (flow_dv_port_id_action_resource_register
11710                             (dev, &port_id_resource, dev_flow, error))
11711                                 return -rte_errno;
11712                         sample_act->dr_port_id_action =
11713                                 dev_flow->dv.port_id_action->action;
11714                         sample_idx->rix_port_id_action =
11715                                 dev_flow->handle->rix_port_id_action;
11716                         sample_actions[sample_act->actions_num++] =
11717                                                 sample_act->dr_port_id_action;
11718                         /* Recover the port id resource after sample */
11719                         dev_flow->dv.port_id_action = pre_r;
11720                         dev_flow->handle->rix_port_id_action = pre_rix;
11721                         (*num_of_dest)++;
11722                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11723                         break;
11724                 }
11725                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11726                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11727                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11728                         /* Save the encap resource before sample */
11729                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11730                         pre_r = dev_flow->dv.encap_decap;
11731                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11732                                                            dev_flow,
11733                                                            attr->transfer,
11734                                                            error))
11735                                 return -rte_errno;
11736                         sample_act->dr_encap_action =
11737                                 dev_flow->dv.encap_decap->action;
11738                         sample_idx->rix_encap_decap =
11739                                 dev_flow->handle->dvh.rix_encap_decap;
11740                         sample_actions[sample_act->actions_num++] =
11741                                                 sample_act->dr_encap_action;
11742                         /* Recover the encap resource after sample */
11743                         dev_flow->dv.encap_decap = pre_r;
11744                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11745                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11746                         break;
11747                 default:
11748                         return rte_flow_error_set(error, EINVAL,
11749                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11750                                 NULL,
11751                                 "Not support for sampler action");
11752                 }
11753         }
11754         sample_act->action_flags = action_flags;
11755         res->ft_id = dev_flow->dv.group;
11756         if (attr->transfer) {
11757                 union {
11758                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11759                         uint64_t set_action;
11760                 } action_ctx = { .set_action = 0 };
11761
11762                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11763                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11764                          MLX5_MODIFICATION_TYPE_SET);
11765                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11766                          MLX5_MODI_META_REG_C_0);
11767                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11768                          priv->vport_meta_tag);
11769                 res->set_action = action_ctx.set_action;
11770         } else if (attr->ingress) {
11771                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11772         } else {
11773                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11774         }
11775         return 0;
11776 }
11777
11778 /**
11779  * Convert Sample action to DV specification.
11780  *
11781  * @param[in] dev
11782  *   Pointer to rte_eth_dev structure.
11783  * @param[in, out] dev_flow
11784  *   Pointer to the mlx5_flow.
11785  * @param[in] num_of_dest
11786  *   The num of destination.
11787  * @param[in, out] res
11788  *   Pointer to sample resource.
11789  * @param[in, out] mdest_res
11790  *   Pointer to destination array resource.
11791  * @param[in] sample_actions
11792  *   Pointer to sample path actions list.
11793  * @param[in] action_flags
11794  *   Holds the actions detected until now.
11795  * @param[out] error
11796  *   Pointer to the error structure.
11797  *
11798  * @return
11799  *   0 on success, a negative errno value otherwise and rte_errno is set.
11800  */
11801 static int
11802 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11803                              struct mlx5_flow *dev_flow,
11804                              uint32_t num_of_dest,
11805                              struct mlx5_flow_dv_sample_resource *res,
11806                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11807                              void **sample_actions,
11808                              uint64_t action_flags,
11809                              struct rte_flow_error *error)
11810 {
11811         /* update normal path action resource into last index of array */
11812         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11813         struct mlx5_flow_sub_actions_list *sample_act =
11814                                         &mdest_res->sample_act[dest_index];
11815         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11816         struct mlx5_flow_rss_desc *rss_desc;
11817         uint32_t normal_idx = 0;
11818         struct mlx5_hrxq *hrxq;
11819         uint32_t hrxq_idx;
11820
11821         MLX5_ASSERT(wks);
11822         rss_desc = &wks->rss_desc;
11823         if (num_of_dest > 1) {
11824                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11825                         /* Handle QP action for mirroring */
11826                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11827                                                     rss_desc, &hrxq_idx);
11828                         if (!hrxq)
11829                                 return rte_flow_error_set
11830                                      (error, rte_errno,
11831                                       RTE_FLOW_ERROR_TYPE_ACTION,
11832                                       NULL,
11833                                       "cannot create rx queue");
11834                         normal_idx++;
11835                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11836                         sample_act->dr_queue_action = hrxq->action;
11837                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11838                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11839                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11840                 }
11841                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11842                         normal_idx++;
11843                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11844                                 dev_flow->handle->dvh.rix_encap_decap;
11845                         sample_act->dr_encap_action =
11846                                 dev_flow->dv.encap_decap->action;
11847                         dev_flow->handle->dvh.rix_encap_decap = 0;
11848                 }
11849                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11850                         normal_idx++;
11851                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11852                                 dev_flow->handle->rix_port_id_action;
11853                         sample_act->dr_port_id_action =
11854                                 dev_flow->dv.port_id_action->action;
11855                         dev_flow->handle->rix_port_id_action = 0;
11856                 }
11857                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11858                         normal_idx++;
11859                         mdest_res->sample_idx[dest_index].rix_jump =
11860                                 dev_flow->handle->rix_jump;
11861                         sample_act->dr_jump_action =
11862                                 dev_flow->dv.jump->action;
11863                         dev_flow->handle->rix_jump = 0;
11864                 }
11865                 sample_act->actions_num = normal_idx;
11866                 /* update sample action resource into first index of array */
11867                 mdest_res->ft_type = res->ft_type;
11868                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11869                                 sizeof(struct mlx5_flow_sub_actions_idx));
11870                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11871                                 sizeof(struct mlx5_flow_sub_actions_list));
11872                 mdest_res->num_of_dest = num_of_dest;
11873                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11874                                                          dev_flow, error))
11875                         return rte_flow_error_set(error, EINVAL,
11876                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11877                                                   NULL, "can't create sample "
11878                                                   "action");
11879         } else {
11880                 res->sub_actions = sample_actions;
11881                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11882                         return rte_flow_error_set(error, EINVAL,
11883                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11884                                                   NULL,
11885                                                   "can't create sample action");
11886         }
11887         return 0;
11888 }
11889
11890 /**
11891  * Remove an ASO age action from age actions list.
11892  *
11893  * @param[in] dev
11894  *   Pointer to the Ethernet device structure.
11895  * @param[in] age
11896  *   Pointer to the aso age action handler.
11897  */
11898 static void
11899 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11900                                 struct mlx5_aso_age_action *age)
11901 {
11902         struct mlx5_age_info *age_info;
11903         struct mlx5_age_param *age_param = &age->age_params;
11904         struct mlx5_priv *priv = dev->data->dev_private;
11905         uint16_t expected = AGE_CANDIDATE;
11906
11907         age_info = GET_PORT_AGE_INFO(priv);
11908         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11909                                          AGE_FREE, false, __ATOMIC_RELAXED,
11910                                          __ATOMIC_RELAXED)) {
11911                 /**
11912                  * We need the lock even it is age timeout,
11913                  * since age action may still in process.
11914                  */
11915                 rte_spinlock_lock(&age_info->aged_sl);
11916                 LIST_REMOVE(age, next);
11917                 rte_spinlock_unlock(&age_info->aged_sl);
11918                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11919         }
11920 }
11921
11922 /**
11923  * Release an ASO age action.
11924  *
11925  * @param[in] dev
11926  *   Pointer to the Ethernet device structure.
11927  * @param[in] age_idx
11928  *   Index of ASO age action to release.
11929  * @param[in] flow
11930  *   True if the release operation is during flow destroy operation.
11931  *   False if the release operation is during action destroy operation.
11932  *
11933  * @return
11934  *   0 when age action was removed, otherwise the number of references.
11935  */
11936 static int
11937 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11938 {
11939         struct mlx5_priv *priv = dev->data->dev_private;
11940         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11941         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11942         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11943
11944         if (!ret) {
11945                 flow_dv_aso_age_remove_from_age(dev, age);
11946                 rte_spinlock_lock(&mng->free_sl);
11947                 LIST_INSERT_HEAD(&mng->free, age, next);
11948                 rte_spinlock_unlock(&mng->free_sl);
11949         }
11950         return ret;
11951 }
11952
11953 /**
11954  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11955  *
11956  * @param[in] dev
11957  *   Pointer to the Ethernet device structure.
11958  *
11959  * @return
11960  *   0 on success, otherwise negative errno value and rte_errno is set.
11961  */
11962 static int
11963 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11964 {
11965         struct mlx5_priv *priv = dev->data->dev_private;
11966         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11967         void *old_pools = mng->pools;
11968         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11969         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11970         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11971
11972         if (!pools) {
11973                 rte_errno = ENOMEM;
11974                 return -ENOMEM;
11975         }
11976         if (old_pools) {
11977                 memcpy(pools, old_pools,
11978                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11979                 mlx5_free(old_pools);
11980         } else {
11981                 /* First ASO flow hit allocation - starting ASO data-path. */
11982                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11983
11984                 if (ret) {
11985                         mlx5_free(pools);
11986                         return ret;
11987                 }
11988         }
11989         mng->n = resize;
11990         mng->pools = pools;
11991         return 0;
11992 }
11993
11994 /**
11995  * Create and initialize a new ASO aging pool.
11996  *
11997  * @param[in] dev
11998  *   Pointer to the Ethernet device structure.
11999  * @param[out] age_free
12000  *   Where to put the pointer of a new age action.
12001  *
12002  * @return
12003  *   The age actions pool pointer and @p age_free is set on success,
12004  *   NULL otherwise and rte_errno is set.
12005  */
12006 static struct mlx5_aso_age_pool *
12007 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12008                         struct mlx5_aso_age_action **age_free)
12009 {
12010         struct mlx5_priv *priv = dev->data->dev_private;
12011         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12012         struct mlx5_aso_age_pool *pool = NULL;
12013         struct mlx5_devx_obj *obj = NULL;
12014         uint32_t i;
12015
12016         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12017                                                     priv->sh->cdev->pdn);
12018         if (!obj) {
12019                 rte_errno = ENODATA;
12020                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12021                 return NULL;
12022         }
12023         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12024         if (!pool) {
12025                 claim_zero(mlx5_devx_cmd_destroy(obj));
12026                 rte_errno = ENOMEM;
12027                 return NULL;
12028         }
12029         pool->flow_hit_aso_obj = obj;
12030         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12031         rte_rwlock_write_lock(&mng->resize_rwl);
12032         pool->index = mng->next;
12033         /* Resize pools array if there is no room for the new pool in it. */
12034         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12035                 claim_zero(mlx5_devx_cmd_destroy(obj));
12036                 mlx5_free(pool);
12037                 rte_rwlock_write_unlock(&mng->resize_rwl);
12038                 return NULL;
12039         }
12040         mng->pools[pool->index] = pool;
12041         mng->next++;
12042         rte_rwlock_write_unlock(&mng->resize_rwl);
12043         /* Assign the first action in the new pool, the rest go to free list. */
12044         *age_free = &pool->actions[0];
12045         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12046                 pool->actions[i].offset = i;
12047                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12048         }
12049         return pool;
12050 }
12051
12052 /**
12053  * Allocate a ASO aging bit.
12054  *
12055  * @param[in] dev
12056  *   Pointer to the Ethernet device structure.
12057  * @param[out] error
12058  *   Pointer to the error structure.
12059  *
12060  * @return
12061  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12062  */
12063 static uint32_t
12064 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12065 {
12066         struct mlx5_priv *priv = dev->data->dev_private;
12067         const struct mlx5_aso_age_pool *pool;
12068         struct mlx5_aso_age_action *age_free = NULL;
12069         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12070
12071         MLX5_ASSERT(mng);
12072         /* Try to get the next free age action bit. */
12073         rte_spinlock_lock(&mng->free_sl);
12074         age_free = LIST_FIRST(&mng->free);
12075         if (age_free) {
12076                 LIST_REMOVE(age_free, next);
12077         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12078                 rte_spinlock_unlock(&mng->free_sl);
12079                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12080                                    NULL, "failed to create ASO age pool");
12081                 return 0; /* 0 is an error. */
12082         }
12083         rte_spinlock_unlock(&mng->free_sl);
12084         pool = container_of
12085           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12086                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12087                                                                        actions);
12088         if (!age_free->dr_action) {
12089                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12090                                                  error);
12091
12092                 if (reg_c < 0) {
12093                         rte_flow_error_set(error, rte_errno,
12094                                            RTE_FLOW_ERROR_TYPE_ACTION,
12095                                            NULL, "failed to get reg_c "
12096                                            "for ASO flow hit");
12097                         return 0; /* 0 is an error. */
12098                 }
12099 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12100                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12101                                 (priv->sh->rx_domain,
12102                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12103                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12104                                  (reg_c - REG_C_0));
12105 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12106                 if (!age_free->dr_action) {
12107                         rte_errno = errno;
12108                         rte_spinlock_lock(&mng->free_sl);
12109                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12110                         rte_spinlock_unlock(&mng->free_sl);
12111                         rte_flow_error_set(error, rte_errno,
12112                                            RTE_FLOW_ERROR_TYPE_ACTION,
12113                                            NULL, "failed to create ASO "
12114                                            "flow hit action");
12115                         return 0; /* 0 is an error. */
12116                 }
12117         }
12118         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12119         return pool->index | ((age_free->offset + 1) << 16);
12120 }
12121
12122 /**
12123  * Initialize flow ASO age parameters.
12124  *
12125  * @param[in] dev
12126  *   Pointer to rte_eth_dev structure.
12127  * @param[in] age_idx
12128  *   Index of ASO age action.
12129  * @param[in] context
12130  *   Pointer to flow counter age context.
12131  * @param[in] timeout
12132  *   Aging timeout in seconds.
12133  *
12134  */
12135 static void
12136 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12137                             uint32_t age_idx,
12138                             void *context,
12139                             uint32_t timeout)
12140 {
12141         struct mlx5_aso_age_action *aso_age;
12142
12143         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12144         MLX5_ASSERT(aso_age);
12145         aso_age->age_params.context = context;
12146         aso_age->age_params.timeout = timeout;
12147         aso_age->age_params.port_id = dev->data->port_id;
12148         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12149                          __ATOMIC_RELAXED);
12150         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12151                          __ATOMIC_RELAXED);
12152 }
12153
12154 static void
12155 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12156                                const struct rte_flow_item_integrity *value,
12157                                void *headers_m, void *headers_v)
12158 {
12159         if (mask->l4_ok) {
12160                 /* RTE l4_ok filter aggregates hardware l4_ok and
12161                  * l4_checksum_ok filters.
12162                  * Positive RTE l4_ok match requires hardware match on both L4
12163                  * hardware integrity bits.
12164                  * For negative match, check hardware l4_checksum_ok bit only,
12165                  * because hardware sets that bit to 0 for all packets
12166                  * with bad L4.
12167                  */
12168                 if (value->l4_ok) {
12169                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12170                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12171                 }
12172                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12173                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12174                          !!value->l4_ok);
12175         }
12176         if (mask->l4_csum_ok) {
12177                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12178                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12179                          value->l4_csum_ok);
12180         }
12181 }
12182
12183 static void
12184 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12185                                const struct rte_flow_item_integrity *value,
12186                                void *headers_m, void *headers_v, bool is_ipv4)
12187 {
12188         if (mask->l3_ok) {
12189                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12190                  * ipv4_csum_ok filters.
12191                  * Positive RTE l3_ok match requires hardware match on both L3
12192                  * hardware integrity bits.
12193                  * For negative match, check hardware l3_csum_ok bit only,
12194                  * because hardware sets that bit to 0 for all packets
12195                  * with bad L3.
12196                  */
12197                 if (is_ipv4) {
12198                         if (value->l3_ok) {
12199                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12200                                          l3_ok, 1);
12201                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12202                                          l3_ok, 1);
12203                         }
12204                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12205                                  ipv4_checksum_ok, 1);
12206                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12207                                  ipv4_checksum_ok, !!value->l3_ok);
12208                 } else {
12209                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12210                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12211                                  value->l3_ok);
12212                 }
12213         }
12214         if (mask->ipv4_csum_ok) {
12215                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12216                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12217                          value->ipv4_csum_ok);
12218         }
12219 }
12220
12221 static void
12222 set_integrity_bits(void *headers_m, void *headers_v,
12223                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12224 {
12225         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12226         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12227
12228         /* Integrity bits validation cleared spec pointer */
12229         MLX5_ASSERT(spec != NULL);
12230         if (!mask)
12231                 mask = &rte_flow_item_integrity_mask;
12232         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12233                                        is_l3_ip4);
12234         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12235 }
12236
12237 static void
12238 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12239                                       const
12240                                       struct rte_flow_item *integrity_items[2],
12241                                       uint64_t pattern_flags)
12242 {
12243         void *headers_m, *headers_v;
12244         bool is_l3_ip4;
12245
12246         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12247                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12248                                          inner_headers);
12249                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12250                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12251                             0;
12252                 set_integrity_bits(headers_m, headers_v,
12253                                    integrity_items[1], is_l3_ip4);
12254         }
12255         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12256                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12257                                          outer_headers);
12258                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12259                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12260                             0;
12261                 set_integrity_bits(headers_m, headers_v,
12262                                    integrity_items[0], is_l3_ip4);
12263         }
12264 }
12265
12266 static void
12267 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12268                                  const struct rte_flow_item *integrity_items[2],
12269                                  uint64_t *last_item)
12270 {
12271         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12272
12273         /* integrity bits validation cleared spec pointer */
12274         MLX5_ASSERT(spec != NULL);
12275         if (spec->level > 1) {
12276                 integrity_items[1] = item;
12277                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12278         } else {
12279                 integrity_items[0] = item;
12280                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12281         }
12282 }
12283
12284 /**
12285  * Prepares DV flow counter with aging configuration.
12286  * Gets it by index when exists, creates a new one when doesn't.
12287  *
12288  * @param[in] dev
12289  *   Pointer to rte_eth_dev structure.
12290  * @param[in] dev_flow
12291  *   Pointer to the mlx5_flow.
12292  * @param[in, out] flow
12293  *   Pointer to the sub flow.
12294  * @param[in] count
12295  *   Pointer to the counter action configuration.
12296  * @param[in] age
12297  *   Pointer to the aging action configuration.
12298  * @param[out] error
12299  *   Pointer to the error structure.
12300  *
12301  * @return
12302  *   Pointer to the counter, NULL otherwise.
12303  */
12304 static struct mlx5_flow_counter *
12305 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12306                         struct mlx5_flow *dev_flow,
12307                         struct rte_flow *flow,
12308                         const struct rte_flow_action_count *count,
12309                         const struct rte_flow_action_age *age,
12310                         struct rte_flow_error *error)
12311 {
12312         if (!flow->counter) {
12313                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12314                                                                  count, age);
12315                 if (!flow->counter) {
12316                         rte_flow_error_set(error, rte_errno,
12317                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12318                                            "cannot create counter object.");
12319                         return NULL;
12320                 }
12321         }
12322         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12323 }
12324
12325 /*
12326  * Release an ASO CT action by its own device.
12327  *
12328  * @param[in] dev
12329  *   Pointer to the Ethernet device structure.
12330  * @param[in] idx
12331  *   Index of ASO CT action to release.
12332  *
12333  * @return
12334  *   0 when CT action was removed, otherwise the number of references.
12335  */
12336 static inline int
12337 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12338 {
12339         struct mlx5_priv *priv = dev->data->dev_private;
12340         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12341         uint32_t ret;
12342         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12343         enum mlx5_aso_ct_state state =
12344                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12345
12346         /* Cannot release when CT is in the ASO SQ. */
12347         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12348                 return -1;
12349         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12350         if (!ret) {
12351                 if (ct->dr_action_orig) {
12352 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12353                         claim_zero(mlx5_glue->destroy_flow_action
12354                                         (ct->dr_action_orig));
12355 #endif
12356                         ct->dr_action_orig = NULL;
12357                 }
12358                 if (ct->dr_action_rply) {
12359 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12360                         claim_zero(mlx5_glue->destroy_flow_action
12361                                         (ct->dr_action_rply));
12362 #endif
12363                         ct->dr_action_rply = NULL;
12364                 }
12365                 /* Clear the state to free, no need in 1st allocation. */
12366                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12367                 rte_spinlock_lock(&mng->ct_sl);
12368                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12369                 rte_spinlock_unlock(&mng->ct_sl);
12370         }
12371         return (int)ret;
12372 }
12373
12374 static inline int
12375 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12376                        struct rte_flow_error *error)
12377 {
12378         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12379         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12380         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12381         int ret;
12382
12383         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12384         if (dev->data->dev_started != 1)
12385                 return rte_flow_error_set(error, EAGAIN,
12386                                           RTE_FLOW_ERROR_TYPE_ACTION,
12387                                           NULL,
12388                                           "Indirect CT action cannot be destroyed when the port is stopped");
12389         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12390         if (ret < 0)
12391                 return rte_flow_error_set(error, EAGAIN,
12392                                           RTE_FLOW_ERROR_TYPE_ACTION,
12393                                           NULL,
12394                                           "Current state prevents indirect CT action from being destroyed");
12395         return ret;
12396 }
12397
12398 /*
12399  * Resize the ASO CT pools array by 64 pools.
12400  *
12401  * @param[in] dev
12402  *   Pointer to the Ethernet device structure.
12403  *
12404  * @return
12405  *   0 on success, otherwise negative errno value and rte_errno is set.
12406  */
12407 static int
12408 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12409 {
12410         struct mlx5_priv *priv = dev->data->dev_private;
12411         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12412         void *old_pools = mng->pools;
12413         /* Magic number now, need a macro. */
12414         uint32_t resize = mng->n + 64;
12415         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12416         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12417
12418         if (!pools) {
12419                 rte_errno = ENOMEM;
12420                 return -rte_errno;
12421         }
12422         rte_rwlock_write_lock(&mng->resize_rwl);
12423         /* ASO SQ/QP was already initialized in the startup. */
12424         if (old_pools) {
12425                 /* Realloc could be an alternative choice. */
12426                 rte_memcpy(pools, old_pools,
12427                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12428                 mlx5_free(old_pools);
12429         }
12430         mng->n = resize;
12431         mng->pools = pools;
12432         rte_rwlock_write_unlock(&mng->resize_rwl);
12433         return 0;
12434 }
12435
12436 /*
12437  * Create and initialize a new ASO CT pool.
12438  *
12439  * @param[in] dev
12440  *   Pointer to the Ethernet device structure.
12441  * @param[out] ct_free
12442  *   Where to put the pointer of a new CT action.
12443  *
12444  * @return
12445  *   The CT actions pool pointer and @p ct_free is set on success,
12446  *   NULL otherwise and rte_errno is set.
12447  */
12448 static struct mlx5_aso_ct_pool *
12449 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12450                        struct mlx5_aso_ct_action **ct_free)
12451 {
12452         struct mlx5_priv *priv = dev->data->dev_private;
12453         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12454         struct mlx5_aso_ct_pool *pool = NULL;
12455         struct mlx5_devx_obj *obj = NULL;
12456         uint32_t i;
12457         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12458
12459         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12460                                                           priv->sh->cdev->pdn,
12461                                                           log_obj_size);
12462         if (!obj) {
12463                 rte_errno = ENODATA;
12464                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12465                 return NULL;
12466         }
12467         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12468         if (!pool) {
12469                 rte_errno = ENOMEM;
12470                 claim_zero(mlx5_devx_cmd_destroy(obj));
12471                 return NULL;
12472         }
12473         pool->devx_obj = obj;
12474         pool->index = mng->next;
12475         /* Resize pools array if there is no room for the new pool in it. */
12476         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12477                 claim_zero(mlx5_devx_cmd_destroy(obj));
12478                 mlx5_free(pool);
12479                 return NULL;
12480         }
12481         mng->pools[pool->index] = pool;
12482         mng->next++;
12483         /* Assign the first action in the new pool, the rest go to free list. */
12484         *ct_free = &pool->actions[0];
12485         /* Lock outside, the list operation is safe here. */
12486         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12487                 /* refcnt is 0 when allocating the memory. */
12488                 pool->actions[i].offset = i;
12489                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12490         }
12491         return pool;
12492 }
12493
12494 /*
12495  * Allocate a ASO CT action from free list.
12496  *
12497  * @param[in] dev
12498  *   Pointer to the Ethernet device structure.
12499  * @param[out] error
12500  *   Pointer to the error structure.
12501  *
12502  * @return
12503  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12504  */
12505 static uint32_t
12506 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12507 {
12508         struct mlx5_priv *priv = dev->data->dev_private;
12509         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12510         struct mlx5_aso_ct_action *ct = NULL;
12511         struct mlx5_aso_ct_pool *pool;
12512         uint8_t reg_c;
12513         uint32_t ct_idx;
12514
12515         MLX5_ASSERT(mng);
12516         if (!priv->sh->cdev->config.devx) {
12517                 rte_errno = ENOTSUP;
12518                 return 0;
12519         }
12520         /* Get a free CT action, if no, a new pool will be created. */
12521         rte_spinlock_lock(&mng->ct_sl);
12522         ct = LIST_FIRST(&mng->free_cts);
12523         if (ct) {
12524                 LIST_REMOVE(ct, next);
12525         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12526                 rte_spinlock_unlock(&mng->ct_sl);
12527                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12528                                    NULL, "failed to create ASO CT pool");
12529                 return 0;
12530         }
12531         rte_spinlock_unlock(&mng->ct_sl);
12532         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12533         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12534         /* 0: inactive, 1: created, 2+: used by flows. */
12535         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12536         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12537         if (!ct->dr_action_orig) {
12538 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12539                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12540                         (priv->sh->rx_domain, pool->devx_obj->obj,
12541                          ct->offset,
12542                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12543                          reg_c - REG_C_0);
12544 #else
12545                 RTE_SET_USED(reg_c);
12546 #endif
12547                 if (!ct->dr_action_orig) {
12548                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12549                         rte_flow_error_set(error, rte_errno,
12550                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12551                                            "failed to create ASO CT action");
12552                         return 0;
12553                 }
12554         }
12555         if (!ct->dr_action_rply) {
12556 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12557                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12558                         (priv->sh->rx_domain, pool->devx_obj->obj,
12559                          ct->offset,
12560                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12561                          reg_c - REG_C_0);
12562 #endif
12563                 if (!ct->dr_action_rply) {
12564                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12565                         rte_flow_error_set(error, rte_errno,
12566                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12567                                            "failed to create ASO CT action");
12568                         return 0;
12569                 }
12570         }
12571         return ct_idx;
12572 }
12573
12574 /*
12575  * Create a conntrack object with context and actions by using ASO mechanism.
12576  *
12577  * @param[in] dev
12578  *   Pointer to rte_eth_dev structure.
12579  * @param[in] pro
12580  *   Pointer to conntrack information profile.
12581  * @param[out] error
12582  *   Pointer to the error structure.
12583  *
12584  * @return
12585  *   Index to conntrack object on success, 0 otherwise.
12586  */
12587 static uint32_t
12588 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12589                                    const struct rte_flow_action_conntrack *pro,
12590                                    struct rte_flow_error *error)
12591 {
12592         struct mlx5_priv *priv = dev->data->dev_private;
12593         struct mlx5_dev_ctx_shared *sh = priv->sh;
12594         struct mlx5_aso_ct_action *ct;
12595         uint32_t idx;
12596
12597         if (!sh->ct_aso_en)
12598                 return rte_flow_error_set(error, ENOTSUP,
12599                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12600                                           "Connection is not supported");
12601         idx = flow_dv_aso_ct_alloc(dev, error);
12602         if (!idx)
12603                 return rte_flow_error_set(error, rte_errno,
12604                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12605                                           "Failed to allocate CT object");
12606         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12607         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12608                 return rte_flow_error_set(error, EBUSY,
12609                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12610                                           "Failed to update CT");
12611         ct->is_original = !!pro->is_original_dir;
12612         ct->peer = pro->peer_port;
12613         return idx;
12614 }
12615
12616 /**
12617  * Fill the flow with DV spec, lock free
12618  * (mutex should be acquired by caller).
12619  *
12620  * @param[in] dev
12621  *   Pointer to rte_eth_dev structure.
12622  * @param[in, out] dev_flow
12623  *   Pointer to the sub flow.
12624  * @param[in] attr
12625  *   Pointer to the flow attributes.
12626  * @param[in] items
12627  *   Pointer to the list of items.
12628  * @param[in] actions
12629  *   Pointer to the list of actions.
12630  * @param[out] error
12631  *   Pointer to the error structure.
12632  *
12633  * @return
12634  *   0 on success, a negative errno value otherwise and rte_errno is set.
12635  */
12636 static int
12637 flow_dv_translate(struct rte_eth_dev *dev,
12638                   struct mlx5_flow *dev_flow,
12639                   const struct rte_flow_attr *attr,
12640                   const struct rte_flow_item items[],
12641                   const struct rte_flow_action actions[],
12642                   struct rte_flow_error *error)
12643 {
12644         struct mlx5_priv *priv = dev->data->dev_private;
12645         struct mlx5_sh_config *dev_conf = &priv->sh->config;
12646         struct rte_flow *flow = dev_flow->flow;
12647         struct mlx5_flow_handle *handle = dev_flow->handle;
12648         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12649         struct mlx5_flow_rss_desc *rss_desc;
12650         uint64_t item_flags = 0;
12651         uint64_t last_item = 0;
12652         uint64_t action_flags = 0;
12653         struct mlx5_flow_dv_matcher matcher = {
12654                 .mask = {
12655                         .size = sizeof(matcher.mask.buf),
12656                 },
12657         };
12658         int actions_n = 0;
12659         bool actions_end = false;
12660         union {
12661                 struct mlx5_flow_dv_modify_hdr_resource res;
12662                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12663                             sizeof(struct mlx5_modification_cmd) *
12664                             (MLX5_MAX_MODIFY_NUM + 1)];
12665         } mhdr_dummy;
12666         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12667         const struct rte_flow_action_count *count = NULL;
12668         const struct rte_flow_action_age *non_shared_age = NULL;
12669         union flow_dv_attr flow_attr = { .attr = 0 };
12670         uint32_t tag_be;
12671         union mlx5_flow_tbl_key tbl_key;
12672         uint32_t modify_action_position = UINT32_MAX;
12673         void *match_mask = matcher.mask.buf;
12674         void *match_value = dev_flow->dv.value.buf;
12675         uint8_t next_protocol = 0xff;
12676         struct rte_vlan_hdr vlan = { 0 };
12677         struct mlx5_flow_dv_dest_array_resource mdest_res;
12678         struct mlx5_flow_dv_sample_resource sample_res;
12679         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12680         const struct rte_flow_action_sample *sample = NULL;
12681         struct mlx5_flow_sub_actions_list *sample_act;
12682         uint32_t sample_act_pos = UINT32_MAX;
12683         uint32_t age_act_pos = UINT32_MAX;
12684         uint32_t num_of_dest = 0;
12685         int tmp_actions_n = 0;
12686         uint32_t table;
12687         int ret = 0;
12688         const struct mlx5_flow_tunnel *tunnel = NULL;
12689         struct flow_grp_info grp_info = {
12690                 .external = !!dev_flow->external,
12691                 .transfer = !!attr->transfer,
12692                 .fdb_def_rule = !!priv->fdb_def_rule,
12693                 .skip_scale = dev_flow->skip_scale &
12694                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12695                 .std_tbl_fix = true,
12696         };
12697         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12698         const struct rte_flow_item *tunnel_item = NULL;
12699
12700         if (!wks)
12701                 return rte_flow_error_set(error, ENOMEM,
12702                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12703                                           NULL,
12704                                           "failed to push flow workspace");
12705         rss_desc = &wks->rss_desc;
12706         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12707         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12708         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12709                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12710         /* update normal path action resource into last index of array */
12711         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12712         if (is_tunnel_offload_active(dev)) {
12713                 if (dev_flow->tunnel) {
12714                         RTE_VERIFY(dev_flow->tof_type ==
12715                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12716                         tunnel = dev_flow->tunnel;
12717                 } else {
12718                         tunnel = mlx5_get_tof(items, actions,
12719                                               &dev_flow->tof_type);
12720                         dev_flow->tunnel = tunnel;
12721                 }
12722                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12723                                         (dev, attr, tunnel, dev_flow->tof_type);
12724         }
12725         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12726                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12727         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12728                                        &grp_info, error);
12729         if (ret)
12730                 return ret;
12731         dev_flow->dv.group = table;
12732         if (attr->transfer)
12733                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12734         /* number of actions must be set to 0 in case of dirty stack. */
12735         mhdr_res->actions_num = 0;
12736         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12737                 /*
12738                  * do not add decap action if match rule drops packet
12739                  * HW rejects rules with decap & drop
12740                  *
12741                  * if tunnel match rule was inserted before matching tunnel set
12742                  * rule flow table used in the match rule must be registered.
12743                  * current implementation handles that in the
12744                  * flow_dv_match_register() at the function end.
12745                  */
12746                 bool add_decap = true;
12747                 const struct rte_flow_action *ptr = actions;
12748
12749                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12750                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12751                                 add_decap = false;
12752                                 break;
12753                         }
12754                 }
12755                 if (add_decap) {
12756                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12757                                                            attr->transfer,
12758                                                            error))
12759                                 return -rte_errno;
12760                         dev_flow->dv.actions[actions_n++] =
12761                                         dev_flow->dv.encap_decap->action;
12762                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12763                 }
12764         }
12765         for (; !actions_end ; actions++) {
12766                 const struct rte_flow_action_queue *queue;
12767                 const struct rte_flow_action_rss *rss;
12768                 const struct rte_flow_action *action = actions;
12769                 const uint8_t *rss_key;
12770                 struct mlx5_flow_tbl_resource *tbl;
12771                 struct mlx5_aso_age_action *age_act;
12772                 struct mlx5_flow_counter *cnt_act;
12773                 uint32_t port_id = 0;
12774                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12775                 int action_type = actions->type;
12776                 const struct rte_flow_action *found_action = NULL;
12777                 uint32_t jump_group = 0;
12778                 uint32_t owner_idx;
12779                 struct mlx5_aso_ct_action *ct;
12780
12781                 if (!mlx5_flow_os_action_supported(action_type))
12782                         return rte_flow_error_set(error, ENOTSUP,
12783                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12784                                                   actions,
12785                                                   "action not supported");
12786                 switch (action_type) {
12787                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12788                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12789                         break;
12790                 case RTE_FLOW_ACTION_TYPE_VOID:
12791                         break;
12792                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12793                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12794                         if (flow_dv_translate_action_port_id(dev, action,
12795                                                              &port_id, error))
12796                                 return -rte_errno;
12797                         port_id_resource.port_id = port_id;
12798                         MLX5_ASSERT(!handle->rix_port_id_action);
12799                         if (flow_dv_port_id_action_resource_register
12800                             (dev, &port_id_resource, dev_flow, error))
12801                                 return -rte_errno;
12802                         dev_flow->dv.actions[actions_n++] =
12803                                         dev_flow->dv.port_id_action->action;
12804                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12805                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12806                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12807                         num_of_dest++;
12808                         break;
12809                 case RTE_FLOW_ACTION_TYPE_FLAG:
12810                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12811                         wks->mark = 1;
12812                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12813                                 struct rte_flow_action_mark mark = {
12814                                         .id = MLX5_FLOW_MARK_DEFAULT,
12815                                 };
12816
12817                                 if (flow_dv_convert_action_mark(dev, &mark,
12818                                                                 mhdr_res,
12819                                                                 error))
12820                                         return -rte_errno;
12821                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12822                                 break;
12823                         }
12824                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12825                         /*
12826                          * Only one FLAG or MARK is supported per device flow
12827                          * right now. So the pointer to the tag resource must be
12828                          * zero before the register process.
12829                          */
12830                         MLX5_ASSERT(!handle->dvh.rix_tag);
12831                         if (flow_dv_tag_resource_register(dev, tag_be,
12832                                                           dev_flow, error))
12833                                 return -rte_errno;
12834                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12835                         dev_flow->dv.actions[actions_n++] =
12836                                         dev_flow->dv.tag_resource->action;
12837                         break;
12838                 case RTE_FLOW_ACTION_TYPE_MARK:
12839                         action_flags |= MLX5_FLOW_ACTION_MARK;
12840                         wks->mark = 1;
12841                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12842                                 const struct rte_flow_action_mark *mark =
12843                                         (const struct rte_flow_action_mark *)
12844                                                 actions->conf;
12845
12846                                 if (flow_dv_convert_action_mark(dev, mark,
12847                                                                 mhdr_res,
12848                                                                 error))
12849                                         return -rte_errno;
12850                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12851                                 break;
12852                         }
12853                         /* Fall-through */
12854                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12855                         /* Legacy (non-extensive) MARK action. */
12856                         tag_be = mlx5_flow_mark_set
12857                               (((const struct rte_flow_action_mark *)
12858                                (actions->conf))->id);
12859                         MLX5_ASSERT(!handle->dvh.rix_tag);
12860                         if (flow_dv_tag_resource_register(dev, tag_be,
12861                                                           dev_flow, error))
12862                                 return -rte_errno;
12863                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12864                         dev_flow->dv.actions[actions_n++] =
12865                                         dev_flow->dv.tag_resource->action;
12866                         break;
12867                 case RTE_FLOW_ACTION_TYPE_SET_META:
12868                         if (flow_dv_convert_action_set_meta
12869                                 (dev, mhdr_res, attr,
12870                                  (const struct rte_flow_action_set_meta *)
12871                                   actions->conf, error))
12872                                 return -rte_errno;
12873                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12874                         break;
12875                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12876                         if (flow_dv_convert_action_set_tag
12877                                 (dev, mhdr_res,
12878                                  (const struct rte_flow_action_set_tag *)
12879                                   actions->conf, error))
12880                                 return -rte_errno;
12881                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12882                         break;
12883                 case RTE_FLOW_ACTION_TYPE_DROP:
12884                         action_flags |= MLX5_FLOW_ACTION_DROP;
12885                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12886                         break;
12887                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12888                         queue = actions->conf;
12889                         rss_desc->queue_num = 1;
12890                         rss_desc->queue[0] = queue->index;
12891                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12892                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12893                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12894                         num_of_dest++;
12895                         break;
12896                 case RTE_FLOW_ACTION_TYPE_RSS:
12897                         rss = actions->conf;
12898                         memcpy(rss_desc->queue, rss->queue,
12899                                rss->queue_num * sizeof(uint16_t));
12900                         rss_desc->queue_num = rss->queue_num;
12901                         /* NULL RSS key indicates default RSS key. */
12902                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12903                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12904                         /*
12905                          * rss->level and rss.types should be set in advance
12906                          * when expanding items for RSS.
12907                          */
12908                         action_flags |= MLX5_FLOW_ACTION_RSS;
12909                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12910                                 MLX5_FLOW_FATE_SHARED_RSS :
12911                                 MLX5_FLOW_FATE_QUEUE;
12912                         break;
12913                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12914                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12915                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12916                         if (flow->age == 0) {
12917                                 flow->age = owner_idx;
12918                                 __atomic_fetch_add(&age_act->refcnt, 1,
12919                                                    __ATOMIC_RELAXED);
12920                         }
12921                         age_act_pos = actions_n++;
12922                         action_flags |= MLX5_FLOW_ACTION_AGE;
12923                         break;
12924                 case RTE_FLOW_ACTION_TYPE_AGE:
12925                         non_shared_age = action->conf;
12926                         age_act_pos = actions_n++;
12927                         action_flags |= MLX5_FLOW_ACTION_AGE;
12928                         break;
12929                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12930                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12931                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12932                                                              NULL);
12933                         MLX5_ASSERT(cnt_act != NULL);
12934                         /**
12935                          * When creating meter drop flow in drop table, the
12936                          * counter should not overwrite the rte flow counter.
12937                          */
12938                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12939                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12940                                 dev_flow->dv.actions[actions_n++] =
12941                                                         cnt_act->action;
12942                         } else {
12943                                 if (flow->counter == 0) {
12944                                         flow->counter = owner_idx;
12945                                         __atomic_fetch_add
12946                                                 (&cnt_act->shared_info.refcnt,
12947                                                  1, __ATOMIC_RELAXED);
12948                                 }
12949                                 /* Save information first, will apply later. */
12950                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12951                         }
12952                         break;
12953                 case RTE_FLOW_ACTION_TYPE_COUNT:
12954                         if (!priv->sh->cdev->config.devx) {
12955                                 return rte_flow_error_set
12956                                               (error, ENOTSUP,
12957                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12958                                                NULL,
12959                                                "count action not supported");
12960                         }
12961                         /* Save information first, will apply later. */
12962                         count = action->conf;
12963                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12964                         break;
12965                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12966                         dev_flow->dv.actions[actions_n++] =
12967                                                 priv->sh->pop_vlan_action;
12968                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12969                         break;
12970                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12971                         if (!(action_flags &
12972                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12973                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12974                         vlan.eth_proto = rte_be_to_cpu_16
12975                              ((((const struct rte_flow_action_of_push_vlan *)
12976                                                    actions->conf)->ethertype));
12977                         found_action = mlx5_flow_find_action
12978                                         (actions + 1,
12979                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12980                         if (found_action)
12981                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12982                         found_action = mlx5_flow_find_action
12983                                         (actions + 1,
12984                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12985                         if (found_action)
12986                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12987                         if (flow_dv_create_action_push_vlan
12988                                             (dev, attr, &vlan, dev_flow, error))
12989                                 return -rte_errno;
12990                         dev_flow->dv.actions[actions_n++] =
12991                                         dev_flow->dv.push_vlan_res->action;
12992                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12993                         break;
12994                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12995                         /* of_vlan_push action handled this action */
12996                         MLX5_ASSERT(action_flags &
12997                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12998                         break;
12999                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13000                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13001                                 break;
13002                         flow_dev_get_vlan_info_from_items(items, &vlan);
13003                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13004                         /* If no VLAN push - this is a modify header action */
13005                         if (flow_dv_convert_action_modify_vlan_vid
13006                                                 (mhdr_res, actions, error))
13007                                 return -rte_errno;
13008                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13009                         break;
13010                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13011                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13012                         if (flow_dv_create_action_l2_encap(dev, actions,
13013                                                            dev_flow,
13014                                                            attr->transfer,
13015                                                            error))
13016                                 return -rte_errno;
13017                         dev_flow->dv.actions[actions_n++] =
13018                                         dev_flow->dv.encap_decap->action;
13019                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13020                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13021                                 sample_act->action_flags |=
13022                                                         MLX5_FLOW_ACTION_ENCAP;
13023                         break;
13024                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13025                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13026                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13027                                                            attr->transfer,
13028                                                            error))
13029                                 return -rte_errno;
13030                         dev_flow->dv.actions[actions_n++] =
13031                                         dev_flow->dv.encap_decap->action;
13032                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13033                         break;
13034                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13035                         /* Handle encap with preceding decap. */
13036                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13037                                 if (flow_dv_create_action_raw_encap
13038                                         (dev, actions, dev_flow, attr, error))
13039                                         return -rte_errno;
13040                                 dev_flow->dv.actions[actions_n++] =
13041                                         dev_flow->dv.encap_decap->action;
13042                         } else {
13043                                 /* Handle encap without preceding decap. */
13044                                 if (flow_dv_create_action_l2_encap
13045                                     (dev, actions, dev_flow, attr->transfer,
13046                                      error))
13047                                         return -rte_errno;
13048                                 dev_flow->dv.actions[actions_n++] =
13049                                         dev_flow->dv.encap_decap->action;
13050                         }
13051                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13052                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13053                                 sample_act->action_flags |=
13054                                                         MLX5_FLOW_ACTION_ENCAP;
13055                         break;
13056                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13057                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13058                                 ;
13059                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13060                                 if (flow_dv_create_action_l2_decap
13061                                     (dev, dev_flow, attr->transfer, error))
13062                                         return -rte_errno;
13063                                 dev_flow->dv.actions[actions_n++] =
13064                                         dev_flow->dv.encap_decap->action;
13065                         }
13066                         /* If decap is followed by encap, handle it at encap. */
13067                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13068                         break;
13069                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13070                         dev_flow->dv.actions[actions_n++] =
13071                                 (void *)(uintptr_t)action->conf;
13072                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13073                         break;
13074                 case RTE_FLOW_ACTION_TYPE_JUMP:
13075                         jump_group = ((const struct rte_flow_action_jump *)
13076                                                         action->conf)->group;
13077                         grp_info.std_tbl_fix = 0;
13078                         if (dev_flow->skip_scale &
13079                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13080                                 grp_info.skip_scale = 1;
13081                         else
13082                                 grp_info.skip_scale = 0;
13083                         ret = mlx5_flow_group_to_table(dev, tunnel,
13084                                                        jump_group,
13085                                                        &table,
13086                                                        &grp_info, error);
13087                         if (ret)
13088                                 return ret;
13089                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13090                                                        attr->transfer,
13091                                                        !!dev_flow->external,
13092                                                        tunnel, jump_group, 0,
13093                                                        0, error);
13094                         if (!tbl)
13095                                 return rte_flow_error_set
13096                                                 (error, errno,
13097                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13098                                                  NULL,
13099                                                  "cannot create jump action.");
13100                         if (flow_dv_jump_tbl_resource_register
13101                             (dev, tbl, dev_flow, error)) {
13102                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13103                                 return rte_flow_error_set
13104                                                 (error, errno,
13105                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13106                                                  NULL,
13107                                                  "cannot create jump action.");
13108                         }
13109                         dev_flow->dv.actions[actions_n++] =
13110                                         dev_flow->dv.jump->action;
13111                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13112                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13113                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13114                         num_of_dest++;
13115                         break;
13116                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13117                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13118                         if (flow_dv_convert_action_modify_mac
13119                                         (mhdr_res, actions, error))
13120                                 return -rte_errno;
13121                         action_flags |= actions->type ==
13122                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13123                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13124                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13125                         break;
13126                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13127                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13128                         if (flow_dv_convert_action_modify_ipv4
13129                                         (mhdr_res, actions, error))
13130                                 return -rte_errno;
13131                         action_flags |= actions->type ==
13132                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13133                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13134                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13135                         break;
13136                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13137                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13138                         if (flow_dv_convert_action_modify_ipv6
13139                                         (mhdr_res, actions, error))
13140                                 return -rte_errno;
13141                         action_flags |= actions->type ==
13142                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13143                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13144                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13145                         break;
13146                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13147                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13148                         if (flow_dv_convert_action_modify_tp
13149                                         (mhdr_res, actions, items,
13150                                          &flow_attr, dev_flow, !!(action_flags &
13151                                          MLX5_FLOW_ACTION_DECAP), error))
13152                                 return -rte_errno;
13153                         action_flags |= actions->type ==
13154                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13155                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13156                                         MLX5_FLOW_ACTION_SET_TP_DST;
13157                         break;
13158                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13159                         if (flow_dv_convert_action_modify_dec_ttl
13160                                         (mhdr_res, items, &flow_attr, dev_flow,
13161                                          !!(action_flags &
13162                                          MLX5_FLOW_ACTION_DECAP), error))
13163                                 return -rte_errno;
13164                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13165                         break;
13166                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13167                         if (flow_dv_convert_action_modify_ttl
13168                                         (mhdr_res, actions, items, &flow_attr,
13169                                          dev_flow, !!(action_flags &
13170                                          MLX5_FLOW_ACTION_DECAP), error))
13171                                 return -rte_errno;
13172                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13173                         break;
13174                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13175                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13176                         if (flow_dv_convert_action_modify_tcp_seq
13177                                         (mhdr_res, actions, error))
13178                                 return -rte_errno;
13179                         action_flags |= actions->type ==
13180                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13181                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13182                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13183                         break;
13184
13185                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13186                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13187                         if (flow_dv_convert_action_modify_tcp_ack
13188                                         (mhdr_res, actions, error))
13189                                 return -rte_errno;
13190                         action_flags |= actions->type ==
13191                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13192                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13193                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13194                         break;
13195                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13196                         if (flow_dv_convert_action_set_reg
13197                                         (mhdr_res, actions, error))
13198                                 return -rte_errno;
13199                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13200                         break;
13201                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13202                         if (flow_dv_convert_action_copy_mreg
13203                                         (dev, mhdr_res, actions, error))
13204                                 return -rte_errno;
13205                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13206                         break;
13207                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13208                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13209                         dev_flow->handle->fate_action =
13210                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13211                         break;
13212                 case RTE_FLOW_ACTION_TYPE_METER:
13213                         if (!wks->fm)
13214                                 return rte_flow_error_set(error, rte_errno,
13215                                         RTE_FLOW_ERROR_TYPE_ACTION,
13216                                         NULL, "Failed to get meter in flow.");
13217                         /* Set the meter action. */
13218                         dev_flow->dv.actions[actions_n++] =
13219                                 wks->fm->meter_action;
13220                         action_flags |= MLX5_FLOW_ACTION_METER;
13221                         break;
13222                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13223                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13224                                                               actions, error))
13225                                 return -rte_errno;
13226                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13227                         break;
13228                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13229                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13230                                                               actions, error))
13231                                 return -rte_errno;
13232                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13233                         break;
13234                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13235                         sample_act_pos = actions_n;
13236                         sample = (const struct rte_flow_action_sample *)
13237                                  action->conf;
13238                         actions_n++;
13239                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13240                         /* put encap action into group if work with port id */
13241                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13242                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13243                                 sample_act->action_flags |=
13244                                                         MLX5_FLOW_ACTION_ENCAP;
13245                         break;
13246                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13247                         if (flow_dv_convert_action_modify_field
13248                                         (dev, mhdr_res, actions, attr, error))
13249                                 return -rte_errno;
13250                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13251                         break;
13252                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13253                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13254                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13255                         if (!ct)
13256                                 return rte_flow_error_set(error, EINVAL,
13257                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13258                                                 NULL,
13259                                                 "Failed to get CT object.");
13260                         if (mlx5_aso_ct_available(priv->sh, ct))
13261                                 return rte_flow_error_set(error, rte_errno,
13262                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13263                                                 NULL,
13264                                                 "CT is unavailable.");
13265                         if (ct->is_original)
13266                                 dev_flow->dv.actions[actions_n] =
13267                                                         ct->dr_action_orig;
13268                         else
13269                                 dev_flow->dv.actions[actions_n] =
13270                                                         ct->dr_action_rply;
13271                         if (flow->ct == 0) {
13272                                 flow->indirect_type =
13273                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13274                                 flow->ct = owner_idx;
13275                                 __atomic_fetch_add(&ct->refcnt, 1,
13276                                                    __ATOMIC_RELAXED);
13277                         }
13278                         actions_n++;
13279                         action_flags |= MLX5_FLOW_ACTION_CT;
13280                         break;
13281                 case RTE_FLOW_ACTION_TYPE_END:
13282                         actions_end = true;
13283                         if (mhdr_res->actions_num) {
13284                                 /* create modify action if needed. */
13285                                 if (flow_dv_modify_hdr_resource_register
13286                                         (dev, mhdr_res, dev_flow, error))
13287                                         return -rte_errno;
13288                                 dev_flow->dv.actions[modify_action_position] =
13289                                         handle->dvh.modify_hdr->action;
13290                         }
13291                         /*
13292                          * Handle AGE and COUNT action by single HW counter
13293                          * when they are not shared.
13294                          */
13295                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13296                                 if ((non_shared_age && count) ||
13297                                     !(priv->sh->flow_hit_aso_en &&
13298                                       (attr->group || attr->transfer))) {
13299                                         /* Creates age by counters. */
13300                                         cnt_act = flow_dv_prepare_counter
13301                                                                 (dev, dev_flow,
13302                                                                  flow, count,
13303                                                                  non_shared_age,
13304                                                                  error);
13305                                         if (!cnt_act)
13306                                                 return -rte_errno;
13307                                         dev_flow->dv.actions[age_act_pos] =
13308                                                                 cnt_act->action;
13309                                         break;
13310                                 }
13311                                 if (!flow->age && non_shared_age) {
13312                                         flow->age = flow_dv_aso_age_alloc
13313                                                                 (dev, error);
13314                                         if (!flow->age)
13315                                                 return -rte_errno;
13316                                         flow_dv_aso_age_params_init
13317                                                     (dev, flow->age,
13318                                                      non_shared_age->context ?
13319                                                      non_shared_age->context :
13320                                                      (void *)(uintptr_t)
13321                                                      (dev_flow->flow_idx),
13322                                                      non_shared_age->timeout);
13323                                 }
13324                                 age_act = flow_aso_age_get_by_idx(dev,
13325                                                                   flow->age);
13326                                 dev_flow->dv.actions[age_act_pos] =
13327                                                              age_act->dr_action;
13328                         }
13329                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13330                                 /*
13331                                  * Create one count action, to be used
13332                                  * by all sub-flows.
13333                                  */
13334                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13335                                                                   flow, count,
13336                                                                   NULL, error);
13337                                 if (!cnt_act)
13338                                         return -rte_errno;
13339                                 dev_flow->dv.actions[actions_n++] =
13340                                                                 cnt_act->action;
13341                         }
13342                 default:
13343                         break;
13344                 }
13345                 if (mhdr_res->actions_num &&
13346                     modify_action_position == UINT32_MAX)
13347                         modify_action_position = actions_n++;
13348         }
13349         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13350                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13351                 int item_type = items->type;
13352
13353                 if (!mlx5_flow_os_item_supported(item_type))
13354                         return rte_flow_error_set(error, ENOTSUP,
13355                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13356                                                   NULL, "item not supported");
13357                 switch (item_type) {
13358                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13359                         flow_dv_translate_item_port_id
13360                                 (dev, match_mask, match_value, items, attr);
13361                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13362                         break;
13363                 case RTE_FLOW_ITEM_TYPE_ETH:
13364                         flow_dv_translate_item_eth(match_mask, match_value,
13365                                                    items, tunnel,
13366                                                    dev_flow->dv.group);
13367                         matcher.priority = action_flags &
13368                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13369                                         !dev_flow->external ?
13370                                         MLX5_PRIORITY_MAP_L3 :
13371                                         MLX5_PRIORITY_MAP_L2;
13372                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13373                                              MLX5_FLOW_LAYER_OUTER_L2;
13374                         break;
13375                 case RTE_FLOW_ITEM_TYPE_VLAN:
13376                         flow_dv_translate_item_vlan(dev_flow,
13377                                                     match_mask, match_value,
13378                                                     items, tunnel,
13379                                                     dev_flow->dv.group);
13380                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13381                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13382                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13383                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13384                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13385                         break;
13386                 case RTE_FLOW_ITEM_TYPE_IPV4:
13387                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13388                                                   &item_flags, &tunnel);
13389                         flow_dv_translate_item_ipv4(match_mask, match_value,
13390                                                     items, tunnel,
13391                                                     dev_flow->dv.group);
13392                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13393                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13394                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13395                         if (items->mask != NULL &&
13396                             ((const struct rte_flow_item_ipv4 *)
13397                              items->mask)->hdr.next_proto_id) {
13398                                 next_protocol =
13399                                         ((const struct rte_flow_item_ipv4 *)
13400                                          (items->spec))->hdr.next_proto_id;
13401                                 next_protocol &=
13402                                         ((const struct rte_flow_item_ipv4 *)
13403                                          (items->mask))->hdr.next_proto_id;
13404                         } else {
13405                                 /* Reset for inner layer. */
13406                                 next_protocol = 0xff;
13407                         }
13408                         break;
13409                 case RTE_FLOW_ITEM_TYPE_IPV6:
13410                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13411                                                   &item_flags, &tunnel);
13412                         flow_dv_translate_item_ipv6(match_mask, match_value,
13413                                                     items, tunnel,
13414                                                     dev_flow->dv.group);
13415                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13416                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13417                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13418                         if (items->mask != NULL &&
13419                             ((const struct rte_flow_item_ipv6 *)
13420                              items->mask)->hdr.proto) {
13421                                 next_protocol =
13422                                         ((const struct rte_flow_item_ipv6 *)
13423                                          items->spec)->hdr.proto;
13424                                 next_protocol &=
13425                                         ((const struct rte_flow_item_ipv6 *)
13426                                          items->mask)->hdr.proto;
13427                         } else {
13428                                 /* Reset for inner layer. */
13429                                 next_protocol = 0xff;
13430                         }
13431                         break;
13432                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13433                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13434                                                              match_value,
13435                                                              items, tunnel);
13436                         last_item = tunnel ?
13437                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13438                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13439                         if (items->mask != NULL &&
13440                             ((const struct rte_flow_item_ipv6_frag_ext *)
13441                              items->mask)->hdr.next_header) {
13442                                 next_protocol =
13443                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13444                                  items->spec)->hdr.next_header;
13445                                 next_protocol &=
13446                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13447                                  items->mask)->hdr.next_header;
13448                         } else {
13449                                 /* Reset for inner layer. */
13450                                 next_protocol = 0xff;
13451                         }
13452                         break;
13453                 case RTE_FLOW_ITEM_TYPE_TCP:
13454                         flow_dv_translate_item_tcp(match_mask, match_value,
13455                                                    items, tunnel);
13456                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13457                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13458                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13459                         break;
13460                 case RTE_FLOW_ITEM_TYPE_UDP:
13461                         flow_dv_translate_item_udp(match_mask, match_value,
13462                                                    items, tunnel);
13463                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13464                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13465                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13466                         break;
13467                 case RTE_FLOW_ITEM_TYPE_GRE:
13468                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13469                         last_item = MLX5_FLOW_LAYER_GRE;
13470                         tunnel_item = items;
13471                         break;
13472                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13473                         flow_dv_translate_item_gre_key(match_mask,
13474                                                        match_value, items);
13475                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13476                         break;
13477                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13478                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13479                         last_item = MLX5_FLOW_LAYER_GRE;
13480                         tunnel_item = items;
13481                         break;
13482                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13483                         flow_dv_translate_item_vxlan(dev, attr,
13484                                                      match_mask, match_value,
13485                                                      items, tunnel);
13486                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13487                         last_item = MLX5_FLOW_LAYER_VXLAN;
13488                         break;
13489                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13490                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13491                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13492                         tunnel_item = items;
13493                         break;
13494                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13495                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13496                         last_item = MLX5_FLOW_LAYER_GENEVE;
13497                         tunnel_item = items;
13498                         break;
13499                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13500                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13501                                                           match_value,
13502                                                           items, error);
13503                         if (ret)
13504                                 return rte_flow_error_set(error, -ret,
13505                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13506                                         "cannot create GENEVE TLV option");
13507                         flow->geneve_tlv_option = 1;
13508                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13509                         break;
13510                 case RTE_FLOW_ITEM_TYPE_MPLS:
13511                         flow_dv_translate_item_mpls(match_mask, match_value,
13512                                                     items, last_item, tunnel);
13513                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13514                         last_item = MLX5_FLOW_LAYER_MPLS;
13515                         break;
13516                 case RTE_FLOW_ITEM_TYPE_MARK:
13517                         flow_dv_translate_item_mark(dev, match_mask,
13518                                                     match_value, items);
13519                         last_item = MLX5_FLOW_ITEM_MARK;
13520                         break;
13521                 case RTE_FLOW_ITEM_TYPE_META:
13522                         flow_dv_translate_item_meta(dev, match_mask,
13523                                                     match_value, attr, items);
13524                         last_item = MLX5_FLOW_ITEM_METADATA;
13525                         break;
13526                 case RTE_FLOW_ITEM_TYPE_ICMP:
13527                         flow_dv_translate_item_icmp(match_mask, match_value,
13528                                                     items, tunnel);
13529                         last_item = MLX5_FLOW_LAYER_ICMP;
13530                         break;
13531                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13532                         flow_dv_translate_item_icmp6(match_mask, match_value,
13533                                                       items, tunnel);
13534                         last_item = MLX5_FLOW_LAYER_ICMP6;
13535                         break;
13536                 case RTE_FLOW_ITEM_TYPE_TAG:
13537                         flow_dv_translate_item_tag(dev, match_mask,
13538                                                    match_value, items);
13539                         last_item = MLX5_FLOW_ITEM_TAG;
13540                         break;
13541                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13542                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13543                                                         match_value, items);
13544                         last_item = MLX5_FLOW_ITEM_TAG;
13545                         break;
13546                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13547                         flow_dv_translate_item_tx_queue(dev, match_mask,
13548                                                         match_value,
13549                                                         items);
13550                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13551                         break;
13552                 case RTE_FLOW_ITEM_TYPE_GTP:
13553                         flow_dv_translate_item_gtp(match_mask, match_value,
13554                                                    items, tunnel);
13555                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13556                         last_item = MLX5_FLOW_LAYER_GTP;
13557                         break;
13558                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13559                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13560                                                           match_value,
13561                                                           items);
13562                         if (ret)
13563                                 return rte_flow_error_set(error, -ret,
13564                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13565                                         "cannot create GTP PSC item");
13566                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13567                         break;
13568                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13569                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13570                                 /* Create it only the first time to be used. */
13571                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13572                                 if (ret)
13573                                         return rte_flow_error_set
13574                                                 (error, -ret,
13575                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13576                                                 NULL,
13577                                                 "cannot create eCPRI parser");
13578                         }
13579                         flow_dv_translate_item_ecpri(dev, match_mask,
13580                                                      match_value, items,
13581                                                      last_item);
13582                         /* No other protocol should follow eCPRI layer. */
13583                         last_item = MLX5_FLOW_LAYER_ECPRI;
13584                         break;
13585                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13586                         flow_dv_translate_item_integrity(items, integrity_items,
13587                                                          &last_item);
13588                         break;
13589                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13590                         flow_dv_translate_item_aso_ct(dev, match_mask,
13591                                                       match_value, items);
13592                         break;
13593                 case RTE_FLOW_ITEM_TYPE_FLEX:
13594                         flow_dv_translate_item_flex(dev, match_mask,
13595                                                     match_value, items,
13596                                                     dev_flow, tunnel != 0);
13597                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13598                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13599                         break;
13600                 default:
13601                         break;
13602                 }
13603                 item_flags |= last_item;
13604         }
13605         /*
13606          * When E-Switch mode is enabled, we have two cases where we need to
13607          * set the source port manually.
13608          * The first one, is in case of Nic steering rule, and the second is
13609          * E-Switch rule where no port_id item was found. In both cases
13610          * the source port is set according the current port in use.
13611          */
13612         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode) {
13613                 if (flow_dv_translate_item_port_id(dev, match_mask,
13614                                                    match_value, NULL, attr))
13615                         return -rte_errno;
13616         }
13617         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13618                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13619                                                       integrity_items,
13620                                                       item_flags);
13621         }
13622         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13623                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13624                                                  tunnel_item, item_flags);
13625         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13626                 flow_dv_translate_item_geneve(match_mask, match_value,
13627                                               tunnel_item, item_flags);
13628         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13629                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13630                         flow_dv_translate_item_gre(match_mask, match_value,
13631                                                    tunnel_item, item_flags);
13632                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13633                         flow_dv_translate_item_nvgre(match_mask, match_value,
13634                                                      tunnel_item, item_flags);
13635                 else
13636                         MLX5_ASSERT(false);
13637         }
13638 #ifdef RTE_LIBRTE_MLX5_DEBUG
13639         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13640                                               dev_flow->dv.value.buf));
13641 #endif
13642         /*
13643          * Layers may be already initialized from prefix flow if this dev_flow
13644          * is the suffix flow.
13645          */
13646         handle->layers |= item_flags;
13647         if (action_flags & MLX5_FLOW_ACTION_RSS)
13648                 flow_dv_hashfields_set(dev_flow, rss_desc);
13649         /* If has RSS action in the sample action, the Sample/Mirror resource
13650          * should be registered after the hash filed be update.
13651          */
13652         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13653                 ret = flow_dv_translate_action_sample(dev,
13654                                                       sample,
13655                                                       dev_flow, attr,
13656                                                       &num_of_dest,
13657                                                       sample_actions,
13658                                                       &sample_res,
13659                                                       error);
13660                 if (ret < 0)
13661                         return ret;
13662                 ret = flow_dv_create_action_sample(dev,
13663                                                    dev_flow,
13664                                                    num_of_dest,
13665                                                    &sample_res,
13666                                                    &mdest_res,
13667                                                    sample_actions,
13668                                                    action_flags,
13669                                                    error);
13670                 if (ret < 0)
13671                         return rte_flow_error_set
13672                                                 (error, rte_errno,
13673                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13674                                                 NULL,
13675                                                 "cannot create sample action");
13676                 if (num_of_dest > 1) {
13677                         dev_flow->dv.actions[sample_act_pos] =
13678                         dev_flow->dv.dest_array_res->action;
13679                 } else {
13680                         dev_flow->dv.actions[sample_act_pos] =
13681                         dev_flow->dv.sample_res->verbs_action;
13682                 }
13683         }
13684         /*
13685          * For multiple destination (sample action with ratio=1), the encap
13686          * action and port id action will be combined into group action.
13687          * So need remove the original these actions in the flow and only
13688          * use the sample action instead of.
13689          */
13690         if (num_of_dest > 1 &&
13691             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13692                 int i;
13693                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13694
13695                 for (i = 0; i < actions_n; i++) {
13696                         if ((sample_act->dr_encap_action &&
13697                                 sample_act->dr_encap_action ==
13698                                 dev_flow->dv.actions[i]) ||
13699                                 (sample_act->dr_port_id_action &&
13700                                 sample_act->dr_port_id_action ==
13701                                 dev_flow->dv.actions[i]) ||
13702                                 (sample_act->dr_jump_action &&
13703                                 sample_act->dr_jump_action ==
13704                                 dev_flow->dv.actions[i]))
13705                                 continue;
13706                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13707                 }
13708                 memcpy((void *)dev_flow->dv.actions,
13709                                 (void *)temp_actions,
13710                                 tmp_actions_n * sizeof(void *));
13711                 actions_n = tmp_actions_n;
13712         }
13713         dev_flow->dv.actions_n = actions_n;
13714         dev_flow->act_flags = action_flags;
13715         if (wks->skip_matcher_reg)
13716                 return 0;
13717         /* Register matcher. */
13718         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13719                                     matcher.mask.size);
13720         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13721                                                      matcher.priority,
13722                                                      dev_flow->external);
13723         /**
13724          * When creating meter drop flow in drop table, using original
13725          * 5-tuple match, the matcher priority should be lower than
13726          * mtr_id matcher.
13727          */
13728         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13729             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13730             matcher.priority <= MLX5_REG_BITS)
13731                 matcher.priority += MLX5_REG_BITS;
13732         /* reserved field no needs to be set to 0 here. */
13733         tbl_key.is_fdb = attr->transfer;
13734         tbl_key.is_egress = attr->egress;
13735         tbl_key.level = dev_flow->dv.group;
13736         tbl_key.id = dev_flow->dv.table_id;
13737         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13738                                      tunnel, attr->group, error))
13739                 return -rte_errno;
13740         return 0;
13741 }
13742
13743 /**
13744  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13745  * and tunnel.
13746  *
13747  * @param[in, out] action
13748  *   Shred RSS action holding hash RX queue objects.
13749  * @param[in] hash_fields
13750  *   Defines combination of packet fields to participate in RX hash.
13751  * @param[in] tunnel
13752  *   Tunnel type
13753  * @param[in] hrxq_idx
13754  *   Hash RX queue index to set.
13755  *
13756  * @return
13757  *   0 on success, otherwise negative errno value.
13758  */
13759 static int
13760 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13761                               const uint64_t hash_fields,
13762                               uint32_t hrxq_idx)
13763 {
13764         uint32_t *hrxqs = action->hrxq;
13765
13766         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13767         case MLX5_RSS_HASH_IPV4:
13768                 /* fall-through. */
13769         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13770                 /* fall-through. */
13771         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13772                 hrxqs[0] = hrxq_idx;
13773                 return 0;
13774         case MLX5_RSS_HASH_IPV4_TCP:
13775                 /* fall-through. */
13776         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13777                 /* fall-through. */
13778         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13779                 hrxqs[1] = hrxq_idx;
13780                 return 0;
13781         case MLX5_RSS_HASH_IPV4_UDP:
13782                 /* fall-through. */
13783         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13784                 /* fall-through. */
13785         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13786                 hrxqs[2] = hrxq_idx;
13787                 return 0;
13788         case MLX5_RSS_HASH_IPV6:
13789                 /* fall-through. */
13790         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13791                 /* fall-through. */
13792         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13793                 hrxqs[3] = hrxq_idx;
13794                 return 0;
13795         case MLX5_RSS_HASH_IPV6_TCP:
13796                 /* fall-through. */
13797         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13798                 /* fall-through. */
13799         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13800                 hrxqs[4] = hrxq_idx;
13801                 return 0;
13802         case MLX5_RSS_HASH_IPV6_UDP:
13803                 /* fall-through. */
13804         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13805                 /* fall-through. */
13806         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13807                 hrxqs[5] = hrxq_idx;
13808                 return 0;
13809         case MLX5_RSS_HASH_NONE:
13810                 hrxqs[6] = hrxq_idx;
13811                 return 0;
13812         default:
13813                 return -1;
13814         }
13815 }
13816
13817 /**
13818  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13819  * and tunnel.
13820  *
13821  * @param[in] dev
13822  *   Pointer to the Ethernet device structure.
13823  * @param[in] idx
13824  *   Shared RSS action ID holding hash RX queue objects.
13825  * @param[in] hash_fields
13826  *   Defines combination of packet fields to participate in RX hash.
13827  * @param[in] tunnel
13828  *   Tunnel type
13829  *
13830  * @return
13831  *   Valid hash RX queue index, otherwise 0.
13832  */
13833 static uint32_t
13834 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13835                                  const uint64_t hash_fields)
13836 {
13837         struct mlx5_priv *priv = dev->data->dev_private;
13838         struct mlx5_shared_action_rss *shared_rss =
13839             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13840         const uint32_t *hrxqs = shared_rss->hrxq;
13841
13842         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13843         case MLX5_RSS_HASH_IPV4:
13844                 /* fall-through. */
13845         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13846                 /* fall-through. */
13847         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13848                 return hrxqs[0];
13849         case MLX5_RSS_HASH_IPV4_TCP:
13850                 /* fall-through. */
13851         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13852                 /* fall-through. */
13853         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13854                 return hrxqs[1];
13855         case MLX5_RSS_HASH_IPV4_UDP:
13856                 /* fall-through. */
13857         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13858                 /* fall-through. */
13859         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13860                 return hrxqs[2];
13861         case MLX5_RSS_HASH_IPV6:
13862                 /* fall-through. */
13863         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13864                 /* fall-through. */
13865         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13866                 return hrxqs[3];
13867         case MLX5_RSS_HASH_IPV6_TCP:
13868                 /* fall-through. */
13869         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13870                 /* fall-through. */
13871         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13872                 return hrxqs[4];
13873         case MLX5_RSS_HASH_IPV6_UDP:
13874                 /* fall-through. */
13875         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13876                 /* fall-through. */
13877         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13878                 return hrxqs[5];
13879         case MLX5_RSS_HASH_NONE:
13880                 return hrxqs[6];
13881         default:
13882                 return 0;
13883         }
13884
13885 }
13886
13887 /**
13888  * Apply the flow to the NIC, lock free,
13889  * (mutex should be acquired by caller).
13890  *
13891  * @param[in] dev
13892  *   Pointer to the Ethernet device structure.
13893  * @param[in, out] flow
13894  *   Pointer to flow structure.
13895  * @param[out] error
13896  *   Pointer to error structure.
13897  *
13898  * @return
13899  *   0 on success, a negative errno value otherwise and rte_errno is set.
13900  */
13901 static int
13902 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13903               struct rte_flow_error *error)
13904 {
13905         struct mlx5_flow_dv_workspace *dv;
13906         struct mlx5_flow_handle *dh;
13907         struct mlx5_flow_handle_dv *dv_h;
13908         struct mlx5_flow *dev_flow;
13909         struct mlx5_priv *priv = dev->data->dev_private;
13910         uint32_t handle_idx;
13911         int n;
13912         int err;
13913         int idx;
13914         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13915         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13916         uint8_t misc_mask;
13917
13918         MLX5_ASSERT(wks);
13919         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13920                 dev_flow = &wks->flows[idx];
13921                 dv = &dev_flow->dv;
13922                 dh = dev_flow->handle;
13923                 dv_h = &dh->dvh;
13924                 n = dv->actions_n;
13925                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13926                         if (dv->transfer) {
13927                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13928                                 dv->actions[n++] = priv->sh->dr_drop_action;
13929                         } else {
13930 #ifdef HAVE_MLX5DV_DR
13931                                 /* DR supports drop action placeholder. */
13932                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13933                                 dv->actions[n++] = dv->group ?
13934                                         priv->sh->dr_drop_action :
13935                                         priv->root_drop_action;
13936 #else
13937                                 /* For DV we use the explicit drop queue. */
13938                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13939                                 dv->actions[n++] =
13940                                                 priv->drop_queue.hrxq->action;
13941 #endif
13942                         }
13943                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13944                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13945                         struct mlx5_hrxq *hrxq;
13946                         uint32_t hrxq_idx;
13947
13948                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13949                                                     &hrxq_idx);
13950                         if (!hrxq) {
13951                                 rte_flow_error_set
13952                                         (error, rte_errno,
13953                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13954                                          "cannot get hash queue");
13955                                 goto error;
13956                         }
13957                         dh->rix_hrxq = hrxq_idx;
13958                         dv->actions[n++] = hrxq->action;
13959                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13960                         struct mlx5_hrxq *hrxq = NULL;
13961                         uint32_t hrxq_idx;
13962
13963                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13964                                                 rss_desc->shared_rss,
13965                                                 dev_flow->hash_fields);
13966                         if (hrxq_idx)
13967                                 hrxq = mlx5_ipool_get
13968                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13969                                          hrxq_idx);
13970                         if (!hrxq) {
13971                                 rte_flow_error_set
13972                                         (error, rte_errno,
13973                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13974                                          "cannot get hash queue");
13975                                 goto error;
13976                         }
13977                         dh->rix_srss = rss_desc->shared_rss;
13978                         dv->actions[n++] = hrxq->action;
13979                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13980                         if (!priv->sh->default_miss_action) {
13981                                 rte_flow_error_set
13982                                         (error, rte_errno,
13983                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13984                                          "default miss action not be created.");
13985                                 goto error;
13986                         }
13987                         dv->actions[n++] = priv->sh->default_miss_action;
13988                 }
13989                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13990                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13991                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13992                                                (void *)&dv->value, n,
13993                                                dv->actions, &dh->drv_flow);
13994                 if (err) {
13995                         rte_flow_error_set
13996                                 (error, errno,
13997                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13998                                 NULL,
13999                                 (!priv->sh->config.allow_duplicate_pattern &&
14000                                 errno == EEXIST) ?
14001                                 "duplicating pattern is not allowed" :
14002                                 "hardware refuses to create flow");
14003                         goto error;
14004                 }
14005                 if (priv->vmwa_context &&
14006                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14007                         /*
14008                          * The rule contains the VLAN pattern.
14009                          * For VF we are going to create VLAN
14010                          * interface to make hypervisor set correct
14011                          * e-Switch vport context.
14012                          */
14013                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14014                 }
14015         }
14016         return 0;
14017 error:
14018         err = rte_errno; /* Save rte_errno before cleanup. */
14019         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14020                        handle_idx, dh, next) {
14021                 /* hrxq is union, don't clear it if the flag is not set. */
14022                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14023                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14024                         dh->rix_hrxq = 0;
14025                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14026                         dh->rix_srss = 0;
14027                 }
14028                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14029                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14030         }
14031         rte_errno = err; /* Restore rte_errno. */
14032         return -rte_errno;
14033 }
14034
14035 void
14036 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14037                           struct mlx5_list_entry *entry)
14038 {
14039         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14040                                                              typeof(*resource),
14041                                                              entry);
14042
14043         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14044         mlx5_free(resource);
14045 }
14046
14047 /**
14048  * Release the flow matcher.
14049  *
14050  * @param dev
14051  *   Pointer to Ethernet device.
14052  * @param port_id
14053  *   Index to port ID action resource.
14054  *
14055  * @return
14056  *   1 while a reference on it exists, 0 when freed.
14057  */
14058 static int
14059 flow_dv_matcher_release(struct rte_eth_dev *dev,
14060                         struct mlx5_flow_handle *handle)
14061 {
14062         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14063         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14064                                                             typeof(*tbl), tbl);
14065         int ret;
14066
14067         MLX5_ASSERT(matcher->matcher_object);
14068         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14069         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14070         return ret;
14071 }
14072
14073 void
14074 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14075 {
14076         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14077         struct mlx5_flow_dv_encap_decap_resource *res =
14078                                        container_of(entry, typeof(*res), entry);
14079
14080         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14081         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14082 }
14083
14084 /**
14085  * Release an encap/decap resource.
14086  *
14087  * @param dev
14088  *   Pointer to Ethernet device.
14089  * @param encap_decap_idx
14090  *   Index of encap decap resource.
14091  *
14092  * @return
14093  *   1 while a reference on it exists, 0 when freed.
14094  */
14095 static int
14096 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14097                                      uint32_t encap_decap_idx)
14098 {
14099         struct mlx5_priv *priv = dev->data->dev_private;
14100         struct mlx5_flow_dv_encap_decap_resource *resource;
14101
14102         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14103                                   encap_decap_idx);
14104         if (!resource)
14105                 return 0;
14106         MLX5_ASSERT(resource->action);
14107         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14108 }
14109
14110 /**
14111  * Release an jump to table action resource.
14112  *
14113  * @param dev
14114  *   Pointer to Ethernet device.
14115  * @param rix_jump
14116  *   Index to the jump action resource.
14117  *
14118  * @return
14119  *   1 while a reference on it exists, 0 when freed.
14120  */
14121 static int
14122 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14123                                   uint32_t rix_jump)
14124 {
14125         struct mlx5_priv *priv = dev->data->dev_private;
14126         struct mlx5_flow_tbl_data_entry *tbl_data;
14127
14128         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14129                                   rix_jump);
14130         if (!tbl_data)
14131                 return 0;
14132         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14133 }
14134
14135 void
14136 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14137 {
14138         struct mlx5_flow_dv_modify_hdr_resource *res =
14139                 container_of(entry, typeof(*res), entry);
14140         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14141
14142         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14143         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14144 }
14145
14146 /**
14147  * Release a modify-header resource.
14148  *
14149  * @param dev
14150  *   Pointer to Ethernet device.
14151  * @param handle
14152  *   Pointer to mlx5_flow_handle.
14153  *
14154  * @return
14155  *   1 while a reference on it exists, 0 when freed.
14156  */
14157 static int
14158 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14159                                     struct mlx5_flow_handle *handle)
14160 {
14161         struct mlx5_priv *priv = dev->data->dev_private;
14162         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14163
14164         MLX5_ASSERT(entry->action);
14165         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14166 }
14167
14168 void
14169 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14170 {
14171         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14172         struct mlx5_flow_dv_port_id_action_resource *resource =
14173                                   container_of(entry, typeof(*resource), entry);
14174
14175         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14176         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14177 }
14178
14179 /**
14180  * Release port ID action resource.
14181  *
14182  * @param dev
14183  *   Pointer to Ethernet device.
14184  * @param handle
14185  *   Pointer to mlx5_flow_handle.
14186  *
14187  * @return
14188  *   1 while a reference on it exists, 0 when freed.
14189  */
14190 static int
14191 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14192                                         uint32_t port_id)
14193 {
14194         struct mlx5_priv *priv = dev->data->dev_private;
14195         struct mlx5_flow_dv_port_id_action_resource *resource;
14196
14197         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14198         if (!resource)
14199                 return 0;
14200         MLX5_ASSERT(resource->action);
14201         return mlx5_list_unregister(priv->sh->port_id_action_list,
14202                                     &resource->entry);
14203 }
14204
14205 /**
14206  * Release shared RSS action resource.
14207  *
14208  * @param dev
14209  *   Pointer to Ethernet device.
14210  * @param srss
14211  *   Shared RSS action index.
14212  */
14213 static void
14214 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14215 {
14216         struct mlx5_priv *priv = dev->data->dev_private;
14217         struct mlx5_shared_action_rss *shared_rss;
14218
14219         shared_rss = mlx5_ipool_get
14220                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14221         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14222 }
14223
14224 void
14225 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14226 {
14227         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14228         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14229                         container_of(entry, typeof(*resource), entry);
14230
14231         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14232         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14233 }
14234
14235 /**
14236  * Release push vlan action resource.
14237  *
14238  * @param dev
14239  *   Pointer to Ethernet device.
14240  * @param handle
14241  *   Pointer to mlx5_flow_handle.
14242  *
14243  * @return
14244  *   1 while a reference on it exists, 0 when freed.
14245  */
14246 static int
14247 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14248                                           struct mlx5_flow_handle *handle)
14249 {
14250         struct mlx5_priv *priv = dev->data->dev_private;
14251         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14252         uint32_t idx = handle->dvh.rix_push_vlan;
14253
14254         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14255         if (!resource)
14256                 return 0;
14257         MLX5_ASSERT(resource->action);
14258         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14259                                     &resource->entry);
14260 }
14261
14262 /**
14263  * Release the fate resource.
14264  *
14265  * @param dev
14266  *   Pointer to Ethernet device.
14267  * @param handle
14268  *   Pointer to mlx5_flow_handle.
14269  */
14270 static void
14271 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14272                                struct mlx5_flow_handle *handle)
14273 {
14274         if (!handle->rix_fate)
14275                 return;
14276         switch (handle->fate_action) {
14277         case MLX5_FLOW_FATE_QUEUE:
14278                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14279                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14280                 break;
14281         case MLX5_FLOW_FATE_JUMP:
14282                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14283                 break;
14284         case MLX5_FLOW_FATE_PORT_ID:
14285                 flow_dv_port_id_action_resource_release(dev,
14286                                 handle->rix_port_id_action);
14287                 break;
14288         default:
14289                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14290                 break;
14291         }
14292         handle->rix_fate = 0;
14293 }
14294
14295 void
14296 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14297                          struct mlx5_list_entry *entry)
14298 {
14299         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14300                                                               typeof(*resource),
14301                                                               entry);
14302         struct rte_eth_dev *dev = resource->dev;
14303         struct mlx5_priv *priv = dev->data->dev_private;
14304
14305         if (resource->verbs_action)
14306                 claim_zero(mlx5_flow_os_destroy_flow_action
14307                                                       (resource->verbs_action));
14308         if (resource->normal_path_tbl)
14309                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14310                                              resource->normal_path_tbl);
14311         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14312         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14313         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14314 }
14315
14316 /**
14317  * Release an sample resource.
14318  *
14319  * @param dev
14320  *   Pointer to Ethernet device.
14321  * @param handle
14322  *   Pointer to mlx5_flow_handle.
14323  *
14324  * @return
14325  *   1 while a reference on it exists, 0 when freed.
14326  */
14327 static int
14328 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14329                                      struct mlx5_flow_handle *handle)
14330 {
14331         struct mlx5_priv *priv = dev->data->dev_private;
14332         struct mlx5_flow_dv_sample_resource *resource;
14333
14334         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14335                                   handle->dvh.rix_sample);
14336         if (!resource)
14337                 return 0;
14338         MLX5_ASSERT(resource->verbs_action);
14339         return mlx5_list_unregister(priv->sh->sample_action_list,
14340                                     &resource->entry);
14341 }
14342
14343 void
14344 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14345                              struct mlx5_list_entry *entry)
14346 {
14347         struct mlx5_flow_dv_dest_array_resource *resource =
14348                         container_of(entry, typeof(*resource), entry);
14349         struct rte_eth_dev *dev = resource->dev;
14350         struct mlx5_priv *priv = dev->data->dev_private;
14351         uint32_t i = 0;
14352
14353         MLX5_ASSERT(resource->action);
14354         if (resource->action)
14355                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14356         for (; i < resource->num_of_dest; i++)
14357                 flow_dv_sample_sub_actions_release(dev,
14358                                                    &resource->sample_idx[i]);
14359         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14360         DRV_LOG(DEBUG, "destination array resource %p: removed",
14361                 (void *)resource);
14362 }
14363
14364 /**
14365  * Release an destination array resource.
14366  *
14367  * @param dev
14368  *   Pointer to Ethernet device.
14369  * @param handle
14370  *   Pointer to mlx5_flow_handle.
14371  *
14372  * @return
14373  *   1 while a reference on it exists, 0 when freed.
14374  */
14375 static int
14376 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14377                                     struct mlx5_flow_handle *handle)
14378 {
14379         struct mlx5_priv *priv = dev->data->dev_private;
14380         struct mlx5_flow_dv_dest_array_resource *resource;
14381
14382         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14383                                   handle->dvh.rix_dest_array);
14384         if (!resource)
14385                 return 0;
14386         MLX5_ASSERT(resource->action);
14387         return mlx5_list_unregister(priv->sh->dest_array_list,
14388                                     &resource->entry);
14389 }
14390
14391 static void
14392 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14393 {
14394         struct mlx5_priv *priv = dev->data->dev_private;
14395         struct mlx5_dev_ctx_shared *sh = priv->sh;
14396         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14397                                 sh->geneve_tlv_option_resource;
14398         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14399         if (geneve_opt_resource) {
14400                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14401                                          __ATOMIC_RELAXED))) {
14402                         claim_zero(mlx5_devx_cmd_destroy
14403                                         (geneve_opt_resource->obj));
14404                         mlx5_free(sh->geneve_tlv_option_resource);
14405                         sh->geneve_tlv_option_resource = NULL;
14406                 }
14407         }
14408         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14409 }
14410
14411 /**
14412  * Remove the flow from the NIC but keeps it in memory.
14413  * Lock free, (mutex should be acquired by caller).
14414  *
14415  * @param[in] dev
14416  *   Pointer to Ethernet device.
14417  * @param[in, out] flow
14418  *   Pointer to flow structure.
14419  */
14420 static void
14421 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14422 {
14423         struct mlx5_flow_handle *dh;
14424         uint32_t handle_idx;
14425         struct mlx5_priv *priv = dev->data->dev_private;
14426
14427         if (!flow)
14428                 return;
14429         handle_idx = flow->dev_handles;
14430         while (handle_idx) {
14431                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14432                                     handle_idx);
14433                 if (!dh)
14434                         return;
14435                 if (dh->drv_flow) {
14436                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14437                         dh->drv_flow = NULL;
14438                 }
14439                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14440                         flow_dv_fate_resource_release(dev, dh);
14441                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14442                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14443                 handle_idx = dh->next.next;
14444         }
14445 }
14446
14447 /**
14448  * Remove the flow from the NIC and the memory.
14449  * Lock free, (mutex should be acquired by caller).
14450  *
14451  * @param[in] dev
14452  *   Pointer to the Ethernet device structure.
14453  * @param[in, out] flow
14454  *   Pointer to flow structure.
14455  */
14456 static void
14457 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14458 {
14459         struct mlx5_flow_handle *dev_handle;
14460         struct mlx5_priv *priv = dev->data->dev_private;
14461         struct mlx5_flow_meter_info *fm = NULL;
14462         uint32_t srss = 0;
14463
14464         if (!flow)
14465                 return;
14466         flow_dv_remove(dev, flow);
14467         if (flow->counter) {
14468                 flow_dv_counter_free(dev, flow->counter);
14469                 flow->counter = 0;
14470         }
14471         if (flow->meter) {
14472                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14473                 if (fm)
14474                         mlx5_flow_meter_detach(priv, fm);
14475                 flow->meter = 0;
14476         }
14477         /* Keep the current age handling by default. */
14478         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14479                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14480         else if (flow->age)
14481                 flow_dv_aso_age_release(dev, flow->age);
14482         if (flow->geneve_tlv_option) {
14483                 flow_dv_geneve_tlv_option_resource_release(dev);
14484                 flow->geneve_tlv_option = 0;
14485         }
14486         while (flow->dev_handles) {
14487                 uint32_t tmp_idx = flow->dev_handles;
14488
14489                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14490                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14491                 if (!dev_handle)
14492                         return;
14493                 flow->dev_handles = dev_handle->next.next;
14494                 while (dev_handle->flex_item) {
14495                         int index = rte_bsf32(dev_handle->flex_item);
14496
14497                         mlx5_flex_release_index(dev, index);
14498                         dev_handle->flex_item &= ~RTE_BIT32(index);
14499                 }
14500                 if (dev_handle->dvh.matcher)
14501                         flow_dv_matcher_release(dev, dev_handle);
14502                 if (dev_handle->dvh.rix_sample)
14503                         flow_dv_sample_resource_release(dev, dev_handle);
14504                 if (dev_handle->dvh.rix_dest_array)
14505                         flow_dv_dest_array_resource_release(dev, dev_handle);
14506                 if (dev_handle->dvh.rix_encap_decap)
14507                         flow_dv_encap_decap_resource_release(dev,
14508                                 dev_handle->dvh.rix_encap_decap);
14509                 if (dev_handle->dvh.modify_hdr)
14510                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14511                 if (dev_handle->dvh.rix_push_vlan)
14512                         flow_dv_push_vlan_action_resource_release(dev,
14513                                                                   dev_handle);
14514                 if (dev_handle->dvh.rix_tag)
14515                         flow_dv_tag_release(dev,
14516                                             dev_handle->dvh.rix_tag);
14517                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14518                         flow_dv_fate_resource_release(dev, dev_handle);
14519                 else if (!srss)
14520                         srss = dev_handle->rix_srss;
14521                 if (fm && dev_handle->is_meter_flow_id &&
14522                     dev_handle->split_flow_id)
14523                         mlx5_ipool_free(fm->flow_ipool,
14524                                         dev_handle->split_flow_id);
14525                 else if (dev_handle->split_flow_id &&
14526                     !dev_handle->is_meter_flow_id)
14527                         mlx5_ipool_free(priv->sh->ipool
14528                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14529                                         dev_handle->split_flow_id);
14530                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14531                            tmp_idx);
14532         }
14533         if (srss)
14534                 flow_dv_shared_rss_action_release(dev, srss);
14535 }
14536
14537 /**
14538  * Release array of hash RX queue objects.
14539  * Helper function.
14540  *
14541  * @param[in] dev
14542  *   Pointer to the Ethernet device structure.
14543  * @param[in, out] hrxqs
14544  *   Array of hash RX queue objects.
14545  *
14546  * @return
14547  *   Total number of references to hash RX queue objects in *hrxqs* array
14548  *   after this operation.
14549  */
14550 static int
14551 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14552                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14553 {
14554         size_t i;
14555         int remaining = 0;
14556
14557         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14558                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14559
14560                 if (!ret)
14561                         (*hrxqs)[i] = 0;
14562                 remaining += ret;
14563         }
14564         return remaining;
14565 }
14566
14567 /**
14568  * Release all hash RX queue objects representing shared RSS action.
14569  *
14570  * @param[in] dev
14571  *   Pointer to the Ethernet device structure.
14572  * @param[in, out] action
14573  *   Shared RSS action to remove hash RX queue objects from.
14574  *
14575  * @return
14576  *   Total number of references to hash RX queue objects stored in *action*
14577  *   after this operation.
14578  *   Expected to be 0 if no external references held.
14579  */
14580 static int
14581 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14582                                  struct mlx5_shared_action_rss *shared_rss)
14583 {
14584         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14585 }
14586
14587 /**
14588  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14589  * user input.
14590  *
14591  * Only one hash value is available for one L3+L4 combination:
14592  * for example:
14593  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14594  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14595  * same slot in mlx5_rss_hash_fields.
14596  *
14597  * @param[in] rss
14598  *   Pointer to the shared action RSS conf.
14599  * @param[in, out] hash_field
14600  *   hash_field variable needed to be adjusted.
14601  *
14602  * @return
14603  *   void
14604  */
14605 static void
14606 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14607                                      uint64_t *hash_field)
14608 {
14609         uint64_t rss_types = rss->origin.types;
14610
14611         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14612         case MLX5_RSS_HASH_IPV4:
14613                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14614                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14615                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14616                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14617                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14618                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14619                         else
14620                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14621                 }
14622                 return;
14623         case MLX5_RSS_HASH_IPV6:
14624                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14625                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14626                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14627                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14628                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14629                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14630                         else
14631                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14632                 }
14633                 return;
14634         case MLX5_RSS_HASH_IPV4_UDP:
14635                 /* fall-through. */
14636         case MLX5_RSS_HASH_IPV6_UDP:
14637                 if (rss_types & RTE_ETH_RSS_UDP) {
14638                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14639                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14640                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14641                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14642                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14643                         else
14644                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14645                 }
14646                 return;
14647         case MLX5_RSS_HASH_IPV4_TCP:
14648                 /* fall-through. */
14649         case MLX5_RSS_HASH_IPV6_TCP:
14650                 if (rss_types & RTE_ETH_RSS_TCP) {
14651                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14652                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14653                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14654                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14655                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14656                         else
14657                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14658                 }
14659                 return;
14660         default:
14661                 return;
14662         }
14663 }
14664
14665 /**
14666  * Setup shared RSS action.
14667  * Prepare set of hash RX queue objects sufficient to handle all valid
14668  * hash_fields combinations (see enum ibv_rx_hash_fields).
14669  *
14670  * @param[in] dev
14671  *   Pointer to the Ethernet device structure.
14672  * @param[in] action_idx
14673  *   Shared RSS action ipool index.
14674  * @param[in, out] action
14675  *   Partially initialized shared RSS action.
14676  * @param[out] error
14677  *   Perform verbose error reporting if not NULL. Initialized in case of
14678  *   error only.
14679  *
14680  * @return
14681  *   0 on success, otherwise negative errno value.
14682  */
14683 static int
14684 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14685                            uint32_t action_idx,
14686                            struct mlx5_shared_action_rss *shared_rss,
14687                            struct rte_flow_error *error)
14688 {
14689         struct mlx5_flow_rss_desc rss_desc = { 0 };
14690         size_t i;
14691         int err;
14692
14693         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
14694                                      !!dev->data->dev_started)) {
14695                 return rte_flow_error_set(error, rte_errno,
14696                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14697                                           "cannot setup indirection table");
14698         }
14699         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14700         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14701         rss_desc.const_q = shared_rss->origin.queue;
14702         rss_desc.queue_num = shared_rss->origin.queue_num;
14703         /* Set non-zero value to indicate a shared RSS. */
14704         rss_desc.shared_rss = action_idx;
14705         rss_desc.ind_tbl = shared_rss->ind_tbl;
14706         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14707                 uint32_t hrxq_idx;
14708                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14709                 int tunnel = 0;
14710
14711                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14712                 if (shared_rss->origin.level > 1) {
14713                         hash_fields |= IBV_RX_HASH_INNER;
14714                         tunnel = 1;
14715                 }
14716                 rss_desc.tunnel = tunnel;
14717                 rss_desc.hash_fields = hash_fields;
14718                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14719                 if (!hrxq_idx) {
14720                         rte_flow_error_set
14721                                 (error, rte_errno,
14722                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14723                                  "cannot get hash queue");
14724                         goto error_hrxq_new;
14725                 }
14726                 err = __flow_dv_action_rss_hrxq_set
14727                         (shared_rss, hash_fields, hrxq_idx);
14728                 MLX5_ASSERT(!err);
14729         }
14730         return 0;
14731 error_hrxq_new:
14732         err = rte_errno;
14733         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14734         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
14735                 shared_rss->ind_tbl = NULL;
14736         rte_errno = err;
14737         return -rte_errno;
14738 }
14739
14740 /**
14741  * Create shared RSS action.
14742  *
14743  * @param[in] dev
14744  *   Pointer to the Ethernet device structure.
14745  * @param[in] conf
14746  *   Shared action configuration.
14747  * @param[in] rss
14748  *   RSS action specification used to create shared action.
14749  * @param[out] error
14750  *   Perform verbose error reporting if not NULL. Initialized in case of
14751  *   error only.
14752  *
14753  * @return
14754  *   A valid shared action ID in case of success, 0 otherwise and
14755  *   rte_errno is set.
14756  */
14757 static uint32_t
14758 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14759                             const struct rte_flow_indir_action_conf *conf,
14760                             const struct rte_flow_action_rss *rss,
14761                             struct rte_flow_error *error)
14762 {
14763         struct mlx5_priv *priv = dev->data->dev_private;
14764         struct mlx5_shared_action_rss *shared_rss = NULL;
14765         void *queue = NULL;
14766         struct rte_flow_action_rss *origin;
14767         const uint8_t *rss_key;
14768         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14769         uint32_t idx;
14770
14771         RTE_SET_USED(conf);
14772         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14773                             0, SOCKET_ID_ANY);
14774         shared_rss = mlx5_ipool_zmalloc
14775                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14776         if (!shared_rss || !queue) {
14777                 rte_flow_error_set(error, ENOMEM,
14778                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14779                                    "cannot allocate resource memory");
14780                 goto error_rss_init;
14781         }
14782         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14783                 rte_flow_error_set(error, E2BIG,
14784                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14785                                    "rss action number out of range");
14786                 goto error_rss_init;
14787         }
14788         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14789                                           sizeof(*shared_rss->ind_tbl),
14790                                           0, SOCKET_ID_ANY);
14791         if (!shared_rss->ind_tbl) {
14792                 rte_flow_error_set(error, ENOMEM,
14793                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14794                                    "cannot allocate resource memory");
14795                 goto error_rss_init;
14796         }
14797         memcpy(queue, rss->queue, queue_size);
14798         shared_rss->ind_tbl->queues = queue;
14799         shared_rss->ind_tbl->queues_n = rss->queue_num;
14800         origin = &shared_rss->origin;
14801         origin->func = rss->func;
14802         origin->level = rss->level;
14803         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14804         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14805         /* NULL RSS key indicates default RSS key. */
14806         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14807         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14808         origin->key = &shared_rss->key[0];
14809         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14810         origin->queue = queue;
14811         origin->queue_num = rss->queue_num;
14812         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14813                 goto error_rss_init;
14814         rte_spinlock_init(&shared_rss->action_rss_sl);
14815         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14816         rte_spinlock_lock(&priv->shared_act_sl);
14817         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14818                      &priv->rss_shared_actions, idx, shared_rss, next);
14819         rte_spinlock_unlock(&priv->shared_act_sl);
14820         return idx;
14821 error_rss_init:
14822         if (shared_rss) {
14823                 if (shared_rss->ind_tbl)
14824                         mlx5_free(shared_rss->ind_tbl);
14825                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14826                                 idx);
14827         }
14828         if (queue)
14829                 mlx5_free(queue);
14830         return 0;
14831 }
14832
14833 /**
14834  * Destroy the shared RSS action.
14835  * Release related hash RX queue objects.
14836  *
14837  * @param[in] dev
14838  *   Pointer to the Ethernet device structure.
14839  * @param[in] idx
14840  *   The shared RSS action object ID to be removed.
14841  * @param[out] error
14842  *   Perform verbose error reporting if not NULL. Initialized in case of
14843  *   error only.
14844  *
14845  * @return
14846  *   0 on success, otherwise negative errno value.
14847  */
14848 static int
14849 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14850                              struct rte_flow_error *error)
14851 {
14852         struct mlx5_priv *priv = dev->data->dev_private;
14853         struct mlx5_shared_action_rss *shared_rss =
14854             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14855         uint32_t old_refcnt = 1;
14856         int remaining;
14857         uint16_t *queue = NULL;
14858
14859         if (!shared_rss)
14860                 return rte_flow_error_set(error, EINVAL,
14861                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14862                                           "invalid shared action");
14863         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14864                                          0, 0, __ATOMIC_ACQUIRE,
14865                                          __ATOMIC_RELAXED))
14866                 return rte_flow_error_set(error, EBUSY,
14867                                           RTE_FLOW_ERROR_TYPE_ACTION,
14868                                           NULL,
14869                                           "shared rss has references");
14870         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14871         if (remaining)
14872                 return rte_flow_error_set(error, EBUSY,
14873                                           RTE_FLOW_ERROR_TYPE_ACTION,
14874                                           NULL,
14875                                           "shared rss hrxq has references");
14876         queue = shared_rss->ind_tbl->queues;
14877         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
14878                                                !!dev->data->dev_started);
14879         if (remaining)
14880                 return rte_flow_error_set(error, EBUSY,
14881                                           RTE_FLOW_ERROR_TYPE_ACTION,
14882                                           NULL,
14883                                           "shared rss indirection table has"
14884                                           " references");
14885         mlx5_free(queue);
14886         rte_spinlock_lock(&priv->shared_act_sl);
14887         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14888                      &priv->rss_shared_actions, idx, shared_rss, next);
14889         rte_spinlock_unlock(&priv->shared_act_sl);
14890         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14891                         idx);
14892         return 0;
14893 }
14894
14895 /**
14896  * Create indirect action, lock free,
14897  * (mutex should be acquired by caller).
14898  * Dispatcher for action type specific call.
14899  *
14900  * @param[in] dev
14901  *   Pointer to the Ethernet device structure.
14902  * @param[in] conf
14903  *   Shared action configuration.
14904  * @param[in] action
14905  *   Action specification used to create indirect action.
14906  * @param[out] error
14907  *   Perform verbose error reporting if not NULL. Initialized in case of
14908  *   error only.
14909  *
14910  * @return
14911  *   A valid shared action handle in case of success, NULL otherwise and
14912  *   rte_errno is set.
14913  */
14914 static struct rte_flow_action_handle *
14915 flow_dv_action_create(struct rte_eth_dev *dev,
14916                       const struct rte_flow_indir_action_conf *conf,
14917                       const struct rte_flow_action *action,
14918                       struct rte_flow_error *err)
14919 {
14920         struct mlx5_priv *priv = dev->data->dev_private;
14921         uint32_t age_idx = 0;
14922         uint32_t idx = 0;
14923         uint32_t ret = 0;
14924
14925         switch (action->type) {
14926         case RTE_FLOW_ACTION_TYPE_RSS:
14927                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14928                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14929                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14930                 break;
14931         case RTE_FLOW_ACTION_TYPE_AGE:
14932                 age_idx = flow_dv_aso_age_alloc(dev, err);
14933                 if (!age_idx) {
14934                         ret = -rte_errno;
14935                         break;
14936                 }
14937                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14938                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14939                 flow_dv_aso_age_params_init(dev, age_idx,
14940                                         ((const struct rte_flow_action_age *)
14941                                                 action->conf)->context ?
14942                                         ((const struct rte_flow_action_age *)
14943                                                 action->conf)->context :
14944                                         (void *)(uintptr_t)idx,
14945                                         ((const struct rte_flow_action_age *)
14946                                                 action->conf)->timeout);
14947                 ret = age_idx;
14948                 break;
14949         case RTE_FLOW_ACTION_TYPE_COUNT:
14950                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14951                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14952                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14953                 break;
14954         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14955                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14956                                                          err);
14957                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14958                 break;
14959         default:
14960                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14961                                    NULL, "action type not supported");
14962                 break;
14963         }
14964         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14965 }
14966
14967 /**
14968  * Destroy the indirect action.
14969  * Release action related resources on the NIC and the memory.
14970  * Lock free, (mutex should be acquired by caller).
14971  * Dispatcher for action type specific call.
14972  *
14973  * @param[in] dev
14974  *   Pointer to the Ethernet device structure.
14975  * @param[in] handle
14976  *   The indirect action object handle to be removed.
14977  * @param[out] error
14978  *   Perform verbose error reporting if not NULL. Initialized in case of
14979  *   error only.
14980  *
14981  * @return
14982  *   0 on success, otherwise negative errno value.
14983  */
14984 static int
14985 flow_dv_action_destroy(struct rte_eth_dev *dev,
14986                        struct rte_flow_action_handle *handle,
14987                        struct rte_flow_error *error)
14988 {
14989         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14990         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14991         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14992         struct mlx5_flow_counter *cnt;
14993         uint32_t no_flow_refcnt = 1;
14994         int ret;
14995
14996         switch (type) {
14997         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14998                 return __flow_dv_action_rss_release(dev, idx, error);
14999         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15000                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15001                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15002                                                  &no_flow_refcnt, 1, false,
15003                                                  __ATOMIC_ACQUIRE,
15004                                                  __ATOMIC_RELAXED))
15005                         return rte_flow_error_set(error, EBUSY,
15006                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15007                                                   NULL,
15008                                                   "Indirect count action has references");
15009                 flow_dv_counter_free(dev, idx);
15010                 return 0;
15011         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15012                 ret = flow_dv_aso_age_release(dev, idx);
15013                 if (ret)
15014                         /*
15015                          * In this case, the last flow has a reference will
15016                          * actually release the age action.
15017                          */
15018                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15019                                 " released with references %d.", idx, ret);
15020                 return 0;
15021         case MLX5_INDIRECT_ACTION_TYPE_CT:
15022                 ret = flow_dv_aso_ct_release(dev, idx, error);
15023                 if (ret < 0)
15024                         return ret;
15025                 if (ret > 0)
15026                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15027                                 "has references %d.", idx, ret);
15028                 return 0;
15029         default:
15030                 return rte_flow_error_set(error, ENOTSUP,
15031                                           RTE_FLOW_ERROR_TYPE_ACTION,
15032                                           NULL,
15033                                           "action type not supported");
15034         }
15035 }
15036
15037 /**
15038  * Updates in place shared RSS action configuration.
15039  *
15040  * @param[in] dev
15041  *   Pointer to the Ethernet device structure.
15042  * @param[in] idx
15043  *   The shared RSS action object ID to be updated.
15044  * @param[in] action_conf
15045  *   RSS action specification used to modify *shared_rss*.
15046  * @param[out] error
15047  *   Perform verbose error reporting if not NULL. Initialized in case of
15048  *   error only.
15049  *
15050  * @return
15051  *   0 on success, otherwise negative errno value.
15052  * @note: currently only support update of RSS queues.
15053  */
15054 static int
15055 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15056                             const struct rte_flow_action_rss *action_conf,
15057                             struct rte_flow_error *error)
15058 {
15059         struct mlx5_priv *priv = dev->data->dev_private;
15060         struct mlx5_shared_action_rss *shared_rss =
15061             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15062         int ret = 0;
15063         void *queue = NULL;
15064         uint16_t *queue_old = NULL;
15065         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15066         bool dev_started = !!dev->data->dev_started;
15067
15068         if (!shared_rss)
15069                 return rte_flow_error_set(error, EINVAL,
15070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15071                                           "invalid shared action to update");
15072         if (priv->obj_ops.ind_table_modify == NULL)
15073                 return rte_flow_error_set(error, ENOTSUP,
15074                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15075                                           "cannot modify indirection table");
15076         queue = mlx5_malloc(MLX5_MEM_ZERO,
15077                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15078                             0, SOCKET_ID_ANY);
15079         if (!queue)
15080                 return rte_flow_error_set(error, ENOMEM,
15081                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15082                                           NULL,
15083                                           "cannot allocate resource memory");
15084         memcpy(queue, action_conf->queue, queue_size);
15085         MLX5_ASSERT(shared_rss->ind_tbl);
15086         rte_spinlock_lock(&shared_rss->action_rss_sl);
15087         queue_old = shared_rss->ind_tbl->queues;
15088         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15089                                         queue, action_conf->queue_num,
15090                                         true /* standalone */,
15091                                         dev_started /* ref_new_qs */,
15092                                         dev_started /* deref_old_qs */);
15093         if (ret) {
15094                 mlx5_free(queue);
15095                 ret = rte_flow_error_set(error, rte_errno,
15096                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15097                                           "cannot update indirection table");
15098         } else {
15099                 mlx5_free(queue_old);
15100                 shared_rss->origin.queue = queue;
15101                 shared_rss->origin.queue_num = action_conf->queue_num;
15102         }
15103         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15104         return ret;
15105 }
15106
15107 /*
15108  * Updates in place conntrack context or direction.
15109  * Context update should be synchronized.
15110  *
15111  * @param[in] dev
15112  *   Pointer to the Ethernet device structure.
15113  * @param[in] idx
15114  *   The conntrack object ID to be updated.
15115  * @param[in] update
15116  *   Pointer to the structure of information to update.
15117  * @param[out] error
15118  *   Perform verbose error reporting if not NULL. Initialized in case of
15119  *   error only.
15120  *
15121  * @return
15122  *   0 on success, otherwise negative errno value.
15123  */
15124 static int
15125 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15126                            const struct rte_flow_modify_conntrack *update,
15127                            struct rte_flow_error *error)
15128 {
15129         struct mlx5_priv *priv = dev->data->dev_private;
15130         struct mlx5_aso_ct_action *ct;
15131         const struct rte_flow_action_conntrack *new_prf;
15132         int ret = 0;
15133         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15134         uint32_t dev_idx;
15135
15136         if (PORT_ID(priv) != owner)
15137                 return rte_flow_error_set(error, EACCES,
15138                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15139                                           NULL,
15140                                           "CT object owned by another port");
15141         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15142         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15143         if (!ct->refcnt)
15144                 return rte_flow_error_set(error, ENOMEM,
15145                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15146                                           NULL,
15147                                           "CT object is inactive");
15148         new_prf = &update->new_ct;
15149         if (update->direction)
15150                 ct->is_original = !!new_prf->is_original_dir;
15151         if (update->state) {
15152                 /* Only validate the profile when it needs to be updated. */
15153                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15154                 if (ret)
15155                         return ret;
15156                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15157                 if (ret)
15158                         return rte_flow_error_set(error, EIO,
15159                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15160                                         NULL,
15161                                         "Failed to send CT context update WQE");
15162                 /* Block until ready or a failure. */
15163                 ret = mlx5_aso_ct_available(priv->sh, ct);
15164                 if (ret)
15165                         rte_flow_error_set(error, rte_errno,
15166                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15167                                            NULL,
15168                                            "Timeout to get the CT update");
15169         }
15170         return ret;
15171 }
15172
15173 /**
15174  * Updates in place shared action configuration, lock free,
15175  * (mutex should be acquired by caller).
15176  *
15177  * @param[in] dev
15178  *   Pointer to the Ethernet device structure.
15179  * @param[in] handle
15180  *   The indirect action object handle to be updated.
15181  * @param[in] update
15182  *   Action specification used to modify the action pointed by *handle*.
15183  *   *update* could be of same type with the action pointed by the *handle*
15184  *   handle argument, or some other structures like a wrapper, depending on
15185  *   the indirect action type.
15186  * @param[out] error
15187  *   Perform verbose error reporting if not NULL. Initialized in case of
15188  *   error only.
15189  *
15190  * @return
15191  *   0 on success, otherwise negative errno value.
15192  */
15193 static int
15194 flow_dv_action_update(struct rte_eth_dev *dev,
15195                         struct rte_flow_action_handle *handle,
15196                         const void *update,
15197                         struct rte_flow_error *err)
15198 {
15199         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15200         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15201         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15202         const void *action_conf;
15203
15204         switch (type) {
15205         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15206                 action_conf = ((const struct rte_flow_action *)update)->conf;
15207                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15208         case MLX5_INDIRECT_ACTION_TYPE_CT:
15209                 return __flow_dv_action_ct_update(dev, idx, update, err);
15210         default:
15211                 return rte_flow_error_set(err, ENOTSUP,
15212                                           RTE_FLOW_ERROR_TYPE_ACTION,
15213                                           NULL,
15214                                           "action type update not supported");
15215         }
15216 }
15217
15218 /**
15219  * Destroy the meter sub policy table rules.
15220  * Lock free, (mutex should be acquired by caller).
15221  *
15222  * @param[in] dev
15223  *   Pointer to Ethernet device.
15224  * @param[in] sub_policy
15225  *   Pointer to meter sub policy table.
15226  */
15227 static void
15228 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15229                              struct mlx5_flow_meter_sub_policy *sub_policy)
15230 {
15231         struct mlx5_priv *priv = dev->data->dev_private;
15232         struct mlx5_flow_tbl_data_entry *tbl;
15233         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15234         struct mlx5_flow_meter_info *next_fm;
15235         struct mlx5_sub_policy_color_rule *color_rule;
15236         void *tmp;
15237         uint32_t i;
15238
15239         for (i = 0; i < RTE_COLORS; i++) {
15240                 next_fm = NULL;
15241                 if (i == RTE_COLOR_GREEN && policy &&
15242                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15243                         next_fm = mlx5_flow_meter_find(priv,
15244                                         policy->act_cnt[i].next_mtr_id, NULL);
15245                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15246                                    next_port, tmp) {
15247                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15248                         tbl = container_of(color_rule->matcher->tbl,
15249                                            typeof(*tbl), tbl);
15250                         mlx5_list_unregister(tbl->matchers,
15251                                              &color_rule->matcher->entry);
15252                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15253                                      color_rule, next_port);
15254                         mlx5_free(color_rule);
15255                         if (next_fm)
15256                                 mlx5_flow_meter_detach(priv, next_fm);
15257                 }
15258         }
15259         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15260                 if (sub_policy->rix_hrxq[i]) {
15261                         if (policy && !policy->is_hierarchy)
15262                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15263                         sub_policy->rix_hrxq[i] = 0;
15264                 }
15265                 if (sub_policy->jump_tbl[i]) {
15266                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15267                                                      sub_policy->jump_tbl[i]);
15268                         sub_policy->jump_tbl[i] = NULL;
15269                 }
15270         }
15271         if (sub_policy->tbl_rsc) {
15272                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15273                                              sub_policy->tbl_rsc);
15274                 sub_policy->tbl_rsc = NULL;
15275         }
15276 }
15277
15278 /**
15279  * Destroy policy rules, lock free,
15280  * (mutex should be acquired by caller).
15281  * Dispatcher for action type specific call.
15282  *
15283  * @param[in] dev
15284  *   Pointer to the Ethernet device structure.
15285  * @param[in] mtr_policy
15286  *   Meter policy struct.
15287  */
15288 static void
15289 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15290                              struct mlx5_flow_meter_policy *mtr_policy)
15291 {
15292         uint32_t i, j;
15293         struct mlx5_flow_meter_sub_policy *sub_policy;
15294         uint16_t sub_policy_num;
15295
15296         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15297                 sub_policy_num = (mtr_policy->sub_policy_num >>
15298                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15299                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15300                 for (j = 0; j < sub_policy_num; j++) {
15301                         sub_policy = mtr_policy->sub_policys[i][j];
15302                         if (sub_policy)
15303                                 __flow_dv_destroy_sub_policy_rules(dev,
15304                                                                    sub_policy);
15305                 }
15306         }
15307 }
15308
15309 /**
15310  * Destroy policy action, lock free,
15311  * (mutex should be acquired by caller).
15312  * Dispatcher for action type specific call.
15313  *
15314  * @param[in] dev
15315  *   Pointer to the Ethernet device structure.
15316  * @param[in] mtr_policy
15317  *   Meter policy struct.
15318  */
15319 static void
15320 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15321                       struct mlx5_flow_meter_policy *mtr_policy)
15322 {
15323         struct rte_flow_action *rss_action;
15324         struct mlx5_flow_handle dev_handle;
15325         uint32_t i, j;
15326
15327         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15328                 if (mtr_policy->act_cnt[i].rix_mark) {
15329                         flow_dv_tag_release(dev,
15330                                 mtr_policy->act_cnt[i].rix_mark);
15331                         mtr_policy->act_cnt[i].rix_mark = 0;
15332                 }
15333                 if (mtr_policy->act_cnt[i].modify_hdr) {
15334                         dev_handle.dvh.modify_hdr =
15335                                 mtr_policy->act_cnt[i].modify_hdr;
15336                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15337                 }
15338                 switch (mtr_policy->act_cnt[i].fate_action) {
15339                 case MLX5_FLOW_FATE_SHARED_RSS:
15340                         rss_action = mtr_policy->act_cnt[i].rss;
15341                         mlx5_free(rss_action);
15342                         break;
15343                 case MLX5_FLOW_FATE_PORT_ID:
15344                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15345                                 flow_dv_port_id_action_resource_release(dev,
15346                                 mtr_policy->act_cnt[i].rix_port_id_action);
15347                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15348                         }
15349                         break;
15350                 case MLX5_FLOW_FATE_DROP:
15351                 case MLX5_FLOW_FATE_JUMP:
15352                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15353                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15354                                                 NULL;
15355                         break;
15356                 default:
15357                         /*Queue action do nothing*/
15358                         break;
15359                 }
15360         }
15361         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15362                 mtr_policy->dr_drop_action[j] = NULL;
15363 }
15364
15365 /**
15366  * Create policy action per domain, lock free,
15367  * (mutex should be acquired by caller).
15368  * Dispatcher for action type specific call.
15369  *
15370  * @param[in] dev
15371  *   Pointer to the Ethernet device structure.
15372  * @param[in] mtr_policy
15373  *   Meter policy struct.
15374  * @param[in] action
15375  *   Action specification used to create meter actions.
15376  * @param[out] error
15377  *   Perform verbose error reporting if not NULL. Initialized in case of
15378  *   error only.
15379  *
15380  * @return
15381  *   0 on success, otherwise negative errno value.
15382  */
15383 static int
15384 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15385                         struct mlx5_flow_meter_policy *mtr_policy,
15386                         const struct rte_flow_action *actions[RTE_COLORS],
15387                         enum mlx5_meter_domain domain,
15388                         struct rte_mtr_error *error)
15389 {
15390         struct mlx5_priv *priv = dev->data->dev_private;
15391         struct rte_flow_error flow_err;
15392         const struct rte_flow_action *act;
15393         uint64_t action_flags;
15394         struct mlx5_flow_handle dh;
15395         struct mlx5_flow dev_flow;
15396         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15397         int i, ret;
15398         uint8_t egress, transfer;
15399         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15400         union {
15401                 struct mlx5_flow_dv_modify_hdr_resource res;
15402                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15403                             sizeof(struct mlx5_modification_cmd) *
15404                             (MLX5_MAX_MODIFY_NUM + 1)];
15405         } mhdr_dummy;
15406         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15407         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
15408
15409         MLX5_ASSERT(wks);
15410         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15411         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15412         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15413         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15414         memset(&port_id_action, 0,
15415                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15416         memset(mhdr_res, 0, sizeof(*mhdr_res));
15417         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15418                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15419                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15420         dev_flow.handle = &dh;
15421         dev_flow.dv.port_id_action = &port_id_action;
15422         dev_flow.external = true;
15423         for (i = 0; i < RTE_COLORS; i++) {
15424                 if (i < MLX5_MTR_RTE_COLORS)
15425                         act_cnt = &mtr_policy->act_cnt[i];
15426                 /* Skip the color policy actions creation. */
15427                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15428                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15429                         continue;
15430                 action_flags = 0;
15431                 for (act = actions[i];
15432                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15433                         switch (act->type) {
15434                         case RTE_FLOW_ACTION_TYPE_MARK:
15435                         {
15436                                 uint32_t tag_be = mlx5_flow_mark_set
15437                                         (((const struct rte_flow_action_mark *)
15438                                         (act->conf))->id);
15439
15440                                 if (i >= MLX5_MTR_RTE_COLORS)
15441                                         return -rte_mtr_error_set(error,
15442                                           ENOTSUP,
15443                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15444                                           NULL,
15445                                           "cannot create policy "
15446                                           "mark action for this color");
15447                                 wks->mark = 1;
15448                                 if (flow_dv_tag_resource_register(dev, tag_be,
15449                                                   &dev_flow, &flow_err))
15450                                         return -rte_mtr_error_set(error,
15451                                         ENOTSUP,
15452                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15453                                         NULL,
15454                                         "cannot setup policy mark action");
15455                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15456                                 act_cnt->rix_mark =
15457                                         dev_flow.handle->dvh.rix_tag;
15458                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15459                                 break;
15460                         }
15461                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15462                                 if (i >= MLX5_MTR_RTE_COLORS)
15463                                         return -rte_mtr_error_set(error,
15464                                           ENOTSUP,
15465                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15466                                           NULL,
15467                                           "cannot create policy "
15468                                           "set tag action for this color");
15469                                 if (flow_dv_convert_action_set_tag
15470                                 (dev, mhdr_res,
15471                                 (const struct rte_flow_action_set_tag *)
15472                                 act->conf,  &flow_err))
15473                                         return -rte_mtr_error_set(error,
15474                                         ENOTSUP,
15475                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15476                                         NULL, "cannot convert policy "
15477                                         "set tag action");
15478                                 if (!mhdr_res->actions_num)
15479                                         return -rte_mtr_error_set(error,
15480                                         ENOTSUP,
15481                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15482                                         NULL, "cannot find policy "
15483                                         "set tag action");
15484                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15485                                 break;
15486                         case RTE_FLOW_ACTION_TYPE_DROP:
15487                         {
15488                                 struct mlx5_flow_mtr_mng *mtrmng =
15489                                                 priv->sh->mtrmng;
15490                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15491
15492                                 /*
15493                                  * Create the drop table with
15494                                  * METER DROP level.
15495                                  */
15496                                 if (!mtrmng->drop_tbl[domain]) {
15497                                         mtrmng->drop_tbl[domain] =
15498                                         flow_dv_tbl_resource_get(dev,
15499                                         MLX5_FLOW_TABLE_LEVEL_METER,
15500                                         egress, transfer, false, NULL, 0,
15501                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15502                                         if (!mtrmng->drop_tbl[domain])
15503                                                 return -rte_mtr_error_set
15504                                         (error, ENOTSUP,
15505                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15506                                         NULL,
15507                                         "Failed to create meter drop table");
15508                                 }
15509                                 tbl_data = container_of
15510                                 (mtrmng->drop_tbl[domain],
15511                                 struct mlx5_flow_tbl_data_entry, tbl);
15512                                 if (i < MLX5_MTR_RTE_COLORS) {
15513                                         act_cnt->dr_jump_action[domain] =
15514                                                 tbl_data->jump.action;
15515                                         act_cnt->fate_action =
15516                                                 MLX5_FLOW_FATE_DROP;
15517                                 }
15518                                 if (i == RTE_COLOR_RED)
15519                                         mtr_policy->dr_drop_action[domain] =
15520                                                 tbl_data->jump.action;
15521                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15522                                 break;
15523                         }
15524                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15525                         {
15526                                 if (i >= MLX5_MTR_RTE_COLORS)
15527                                         return -rte_mtr_error_set(error,
15528                                         ENOTSUP,
15529                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15530                                         NULL, "cannot create policy "
15531                                         "fate queue for this color");
15532                                 act_cnt->queue =
15533                                 ((const struct rte_flow_action_queue *)
15534                                         (act->conf))->index;
15535                                 act_cnt->fate_action =
15536                                         MLX5_FLOW_FATE_QUEUE;
15537                                 dev_flow.handle->fate_action =
15538                                         MLX5_FLOW_FATE_QUEUE;
15539                                 mtr_policy->is_queue = 1;
15540                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15541                                 break;
15542                         }
15543                         case RTE_FLOW_ACTION_TYPE_RSS:
15544                         {
15545                                 int rss_size;
15546
15547                                 if (i >= MLX5_MTR_RTE_COLORS)
15548                                         return -rte_mtr_error_set(error,
15549                                           ENOTSUP,
15550                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15551                                           NULL,
15552                                           "cannot create policy "
15553                                           "rss action for this color");
15554                                 /*
15555                                  * Save RSS conf into policy struct
15556                                  * for translate stage.
15557                                  */
15558                                 rss_size = (int)rte_flow_conv
15559                                         (RTE_FLOW_CONV_OP_ACTION,
15560                                         NULL, 0, act, &flow_err);
15561                                 if (rss_size <= 0)
15562                                         return -rte_mtr_error_set(error,
15563                                           ENOTSUP,
15564                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15565                                           NULL, "Get the wrong "
15566                                           "rss action struct size");
15567                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15568                                                 rss_size, 0, SOCKET_ID_ANY);
15569                                 if (!act_cnt->rss)
15570                                         return -rte_mtr_error_set(error,
15571                                           ENOTSUP,
15572                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15573                                           NULL,
15574                                           "Fail to malloc rss action memory");
15575                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15576                                         act_cnt->rss, rss_size,
15577                                         act, &flow_err);
15578                                 if (ret < 0)
15579                                         return -rte_mtr_error_set(error,
15580                                           ENOTSUP,
15581                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15582                                           NULL, "Fail to save "
15583                                           "rss action into policy struct");
15584                                 act_cnt->fate_action =
15585                                         MLX5_FLOW_FATE_SHARED_RSS;
15586                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15587                                 break;
15588                         }
15589                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15590                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15591                         {
15592                                 struct mlx5_flow_dv_port_id_action_resource
15593                                         port_id_resource;
15594                                 uint32_t port_id = 0;
15595
15596                                 if (i >= MLX5_MTR_RTE_COLORS)
15597                                         return -rte_mtr_error_set(error,
15598                                         ENOTSUP,
15599                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15600                                         NULL, "cannot create policy "
15601                                         "port action for this color");
15602                                 memset(&port_id_resource, 0,
15603                                         sizeof(port_id_resource));
15604                                 if (flow_dv_translate_action_port_id(dev, act,
15605                                                 &port_id, &flow_err))
15606                                         return -rte_mtr_error_set(error,
15607                                         ENOTSUP,
15608                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15609                                         NULL, "cannot translate "
15610                                         "policy port action");
15611                                 port_id_resource.port_id = port_id;
15612                                 if (flow_dv_port_id_action_resource_register
15613                                         (dev, &port_id_resource,
15614                                         &dev_flow, &flow_err))
15615                                         return -rte_mtr_error_set(error,
15616                                         ENOTSUP,
15617                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15618                                         NULL, "cannot setup "
15619                                         "policy port action");
15620                                 act_cnt->rix_port_id_action =
15621                                         dev_flow.handle->rix_port_id_action;
15622                                 act_cnt->fate_action =
15623                                         MLX5_FLOW_FATE_PORT_ID;
15624                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15625                                 break;
15626                         }
15627                         case RTE_FLOW_ACTION_TYPE_JUMP:
15628                         {
15629                                 uint32_t jump_group = 0;
15630                                 uint32_t table = 0;
15631                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15632                                 struct flow_grp_info grp_info = {
15633                                         .external = !!dev_flow.external,
15634                                         .transfer = !!transfer,
15635                                         .fdb_def_rule = !!priv->fdb_def_rule,
15636                                         .std_tbl_fix = 0,
15637                                         .skip_scale = dev_flow.skip_scale &
15638                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15639                                 };
15640                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15641                                         mtr_policy->sub_policys[domain][0];
15642
15643                                 if (i >= MLX5_MTR_RTE_COLORS)
15644                                         return -rte_mtr_error_set(error,
15645                                           ENOTSUP,
15646                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15647                                           NULL,
15648                                           "cannot create policy "
15649                                           "jump action for this color");
15650                                 jump_group =
15651                                 ((const struct rte_flow_action_jump *)
15652                                                         act->conf)->group;
15653                                 if (mlx5_flow_group_to_table(dev, NULL,
15654                                                        jump_group,
15655                                                        &table,
15656                                                        &grp_info, &flow_err))
15657                                         return -rte_mtr_error_set(error,
15658                                         ENOTSUP,
15659                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15660                                         NULL, "cannot setup "
15661                                         "policy jump action");
15662                                 sub_policy->jump_tbl[i] =
15663                                 flow_dv_tbl_resource_get(dev,
15664                                         table, egress,
15665                                         transfer,
15666                                         !!dev_flow.external,
15667                                         NULL, jump_group, 0,
15668                                         0, &flow_err);
15669                                 if
15670                                 (!sub_policy->jump_tbl[i])
15671                                         return  -rte_mtr_error_set(error,
15672                                         ENOTSUP,
15673                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15674                                         NULL, "cannot create jump action.");
15675                                 tbl_data = container_of
15676                                 (sub_policy->jump_tbl[i],
15677                                 struct mlx5_flow_tbl_data_entry, tbl);
15678                                 act_cnt->dr_jump_action[domain] =
15679                                         tbl_data->jump.action;
15680                                 act_cnt->fate_action =
15681                                         MLX5_FLOW_FATE_JUMP;
15682                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15683                                 break;
15684                         }
15685                         /*
15686                          * No need to check meter hierarchy for Y or R colors
15687                          * here since it is done in the validation stage.
15688                          */
15689                         case RTE_FLOW_ACTION_TYPE_METER:
15690                         {
15691                                 const struct rte_flow_action_meter *mtr;
15692                                 struct mlx5_flow_meter_info *next_fm;
15693                                 struct mlx5_flow_meter_policy *next_policy;
15694                                 struct rte_flow_action tag_action;
15695                                 struct mlx5_rte_flow_action_set_tag set_tag;
15696                                 uint32_t next_mtr_idx = 0;
15697
15698                                 mtr = act->conf;
15699                                 next_fm = mlx5_flow_meter_find(priv,
15700                                                         mtr->mtr_id,
15701                                                         &next_mtr_idx);
15702                                 if (!next_fm)
15703                                         return -rte_mtr_error_set(error, EINVAL,
15704                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15705                                                 "Fail to find next meter.");
15706                                 if (next_fm->def_policy)
15707                                         return -rte_mtr_error_set(error, EINVAL,
15708                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15709                                 "Hierarchy only supports termination meter.");
15710                                 next_policy = mlx5_flow_meter_policy_find(dev,
15711                                                 next_fm->policy_id, NULL);
15712                                 MLX5_ASSERT(next_policy);
15713                                 if (next_fm->drop_cnt) {
15714                                         set_tag.id =
15715                                                 (enum modify_reg)
15716                                                 mlx5_flow_get_reg_id(dev,
15717                                                 MLX5_MTR_ID,
15718                                                 0,
15719                                                 (struct rte_flow_error *)error);
15720                                         set_tag.offset = (priv->mtr_reg_share ?
15721                                                 MLX5_MTR_COLOR_BITS : 0);
15722                                         set_tag.length = (priv->mtr_reg_share ?
15723                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15724                                                MLX5_REG_BITS);
15725                                         set_tag.data = next_mtr_idx;
15726                                         tag_action.type =
15727                                                 (enum rte_flow_action_type)
15728                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15729                                         tag_action.conf = &set_tag;
15730                                         if (flow_dv_convert_action_set_reg
15731                                                 (mhdr_res, &tag_action,
15732                                                 (struct rte_flow_error *)error))
15733                                                 return -rte_errno;
15734                                         action_flags |=
15735                                                 MLX5_FLOW_ACTION_SET_TAG;
15736                                 }
15737                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15738                                 act_cnt->next_mtr_id = next_fm->meter_id;
15739                                 act_cnt->next_sub_policy = NULL;
15740                                 mtr_policy->is_hierarchy = 1;
15741                                 mtr_policy->dev = next_policy->dev;
15742                                 action_flags |=
15743                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15744                                 break;
15745                         }
15746                         default:
15747                                 return -rte_mtr_error_set(error, ENOTSUP,
15748                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15749                                           NULL, "action type not supported");
15750                         }
15751                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15752                                 /* create modify action if needed. */
15753                                 dev_flow.dv.group = 1;
15754                                 if (flow_dv_modify_hdr_resource_register
15755                                         (dev, mhdr_res, &dev_flow, &flow_err))
15756                                         return -rte_mtr_error_set(error,
15757                                                 ENOTSUP,
15758                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15759                                                 NULL, "cannot register policy "
15760                                                 "set tag action");
15761                                 act_cnt->modify_hdr =
15762                                         dev_flow.handle->dvh.modify_hdr;
15763                         }
15764                 }
15765         }
15766         return 0;
15767 }
15768
15769 /**
15770  * Create policy action per domain, lock free,
15771  * (mutex should be acquired by caller).
15772  * Dispatcher for action type specific call.
15773  *
15774  * @param[in] dev
15775  *   Pointer to the Ethernet device structure.
15776  * @param[in] mtr_policy
15777  *   Meter policy struct.
15778  * @param[in] action
15779  *   Action specification used to create meter actions.
15780  * @param[out] error
15781  *   Perform verbose error reporting if not NULL. Initialized in case of
15782  *   error only.
15783  *
15784  * @return
15785  *   0 on success, otherwise negative errno value.
15786  */
15787 static int
15788 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15789                       struct mlx5_flow_meter_policy *mtr_policy,
15790                       const struct rte_flow_action *actions[RTE_COLORS],
15791                       struct rte_mtr_error *error)
15792 {
15793         int ret, i;
15794         uint16_t sub_policy_num;
15795
15796         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15797                 sub_policy_num = (mtr_policy->sub_policy_num >>
15798                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15799                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15800                 if (sub_policy_num) {
15801                         ret = __flow_dv_create_domain_policy_acts(dev,
15802                                 mtr_policy, actions,
15803                                 (enum mlx5_meter_domain)i, error);
15804                         /* Cleaning resource is done in the caller level. */
15805                         if (ret)
15806                                 return ret;
15807                 }
15808         }
15809         return 0;
15810 }
15811
15812 /**
15813  * Query a DV flow rule for its statistics via DevX.
15814  *
15815  * @param[in] dev
15816  *   Pointer to Ethernet device.
15817  * @param[in] cnt_idx
15818  *   Index to the flow counter.
15819  * @param[out] data
15820  *   Data retrieved by the query.
15821  * @param[out] error
15822  *   Perform verbose error reporting if not NULL.
15823  *
15824  * @return
15825  *   0 on success, a negative errno value otherwise and rte_errno is set.
15826  */
15827 int
15828 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15829                     struct rte_flow_error *error)
15830 {
15831         struct mlx5_priv *priv = dev->data->dev_private;
15832         struct rte_flow_query_count *qc = data;
15833
15834         if (!priv->sh->cdev->config.devx)
15835                 return rte_flow_error_set(error, ENOTSUP,
15836                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15837                                           NULL,
15838                                           "counters are not supported");
15839         if (cnt_idx) {
15840                 uint64_t pkts, bytes;
15841                 struct mlx5_flow_counter *cnt;
15842                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15843
15844                 if (err)
15845                         return rte_flow_error_set(error, -err,
15846                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15847                                         NULL, "cannot read counters");
15848                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15849                 qc->hits_set = 1;
15850                 qc->bytes_set = 1;
15851                 qc->hits = pkts - cnt->hits;
15852                 qc->bytes = bytes - cnt->bytes;
15853                 if (qc->reset) {
15854                         cnt->hits = pkts;
15855                         cnt->bytes = bytes;
15856                 }
15857                 return 0;
15858         }
15859         return rte_flow_error_set(error, EINVAL,
15860                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15861                                   NULL,
15862                                   "counters are not available");
15863 }
15864
15865
15866 /**
15867  * Query counter's action pointer for a DV flow rule via DevX.
15868  *
15869  * @param[in] dev
15870  *   Pointer to Ethernet device.
15871  * @param[in] cnt_idx
15872  *   Index to the flow counter.
15873  * @param[out] action_ptr
15874  *   Action pointer for counter.
15875  * @param[out] error
15876  *   Perform verbose error reporting if not NULL.
15877  *
15878  * @return
15879  *   0 on success, a negative errno value otherwise and rte_errno is set.
15880  */
15881 int
15882 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15883         void **action_ptr, struct rte_flow_error *error)
15884 {
15885         struct mlx5_priv *priv = dev->data->dev_private;
15886
15887         if (!priv->sh->cdev->config.devx || !action_ptr)
15888                 return rte_flow_error_set(error, ENOTSUP,
15889                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15890                                           NULL,
15891                                           "counters are not supported");
15892
15893         if (cnt_idx) {
15894                 struct mlx5_flow_counter *cnt = NULL;
15895                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15896                 if (cnt) {
15897                         *action_ptr = cnt->action;
15898                         return 0;
15899                 }
15900         }
15901         return rte_flow_error_set(error, EINVAL,
15902                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15903                                   NULL,
15904                                   "counters are not available");
15905 }
15906
15907 static int
15908 flow_dv_action_query(struct rte_eth_dev *dev,
15909                      const struct rte_flow_action_handle *handle, void *data,
15910                      struct rte_flow_error *error)
15911 {
15912         struct mlx5_age_param *age_param;
15913         struct rte_flow_query_age *resp;
15914         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15915         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15916         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15917         struct mlx5_priv *priv = dev->data->dev_private;
15918         struct mlx5_aso_ct_action *ct;
15919         uint16_t owner;
15920         uint32_t dev_idx;
15921
15922         switch (type) {
15923         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15924                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15925                 resp = data;
15926                 resp->aged = __atomic_load_n(&age_param->state,
15927                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15928                                                                           1 : 0;
15929                 resp->sec_since_last_hit_valid = !resp->aged;
15930                 if (resp->sec_since_last_hit_valid)
15931                         resp->sec_since_last_hit = __atomic_load_n
15932                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15933                 return 0;
15934         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15935                 return flow_dv_query_count(dev, idx, data, error);
15936         case MLX5_INDIRECT_ACTION_TYPE_CT:
15937                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15938                 if (owner != PORT_ID(priv))
15939                         return rte_flow_error_set(error, EACCES,
15940                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15941                                         NULL,
15942                                         "CT object owned by another port");
15943                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15944                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15945                 MLX5_ASSERT(ct);
15946                 if (!ct->refcnt)
15947                         return rte_flow_error_set(error, EFAULT,
15948                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15949                                         NULL,
15950                                         "CT object is inactive");
15951                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15952                                                         ct->peer;
15953                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15954                                                         ct->is_original;
15955                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15956                         return rte_flow_error_set(error, EIO,
15957                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15958                                         NULL,
15959                                         "Failed to query CT context");
15960                 return 0;
15961         default:
15962                 return rte_flow_error_set(error, ENOTSUP,
15963                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15964                                           "action type query not supported");
15965         }
15966 }
15967
15968 /**
15969  * Query a flow rule AGE action for aging information.
15970  *
15971  * @param[in] dev
15972  *   Pointer to Ethernet device.
15973  * @param[in] flow
15974  *   Pointer to the sub flow.
15975  * @param[out] data
15976  *   data retrieved by the query.
15977  * @param[out] error
15978  *   Perform verbose error reporting if not NULL.
15979  *
15980  * @return
15981  *   0 on success, a negative errno value otherwise and rte_errno is set.
15982  */
15983 static int
15984 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15985                   void *data, struct rte_flow_error *error)
15986 {
15987         struct rte_flow_query_age *resp = data;
15988         struct mlx5_age_param *age_param;
15989
15990         if (flow->age) {
15991                 struct mlx5_aso_age_action *act =
15992                                      flow_aso_age_get_by_idx(dev, flow->age);
15993
15994                 age_param = &act->age_params;
15995         } else if (flow->counter) {
15996                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15997
15998                 if (!age_param || !age_param->timeout)
15999                         return rte_flow_error_set
16000                                         (error, EINVAL,
16001                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16002                                          NULL, "cannot read age data");
16003         } else {
16004                 return rte_flow_error_set(error, EINVAL,
16005                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16006                                           NULL, "age data not available");
16007         }
16008         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16009                                      AGE_TMOUT ? 1 : 0;
16010         resp->sec_since_last_hit_valid = !resp->aged;
16011         if (resp->sec_since_last_hit_valid)
16012                 resp->sec_since_last_hit = __atomic_load_n
16013                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16014         return 0;
16015 }
16016
16017 /**
16018  * Query a flow.
16019  *
16020  * @see rte_flow_query()
16021  * @see rte_flow_ops
16022  */
16023 static int
16024 flow_dv_query(struct rte_eth_dev *dev,
16025               struct rte_flow *flow __rte_unused,
16026               const struct rte_flow_action *actions __rte_unused,
16027               void *data __rte_unused,
16028               struct rte_flow_error *error __rte_unused)
16029 {
16030         int ret = -EINVAL;
16031
16032         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16033                 switch (actions->type) {
16034                 case RTE_FLOW_ACTION_TYPE_VOID:
16035                         break;
16036                 case RTE_FLOW_ACTION_TYPE_COUNT:
16037                         ret = flow_dv_query_count(dev, flow->counter, data,
16038                                                   error);
16039                         break;
16040                 case RTE_FLOW_ACTION_TYPE_AGE:
16041                         ret = flow_dv_query_age(dev, flow, data, error);
16042                         break;
16043                 default:
16044                         return rte_flow_error_set(error, ENOTSUP,
16045                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16046                                                   actions,
16047                                                   "action not supported");
16048                 }
16049         }
16050         return ret;
16051 }
16052
16053 /**
16054  * Destroy the meter table set.
16055  * Lock free, (mutex should be acquired by caller).
16056  *
16057  * @param[in] dev
16058  *   Pointer to Ethernet device.
16059  * @param[in] fm
16060  *   Meter information table.
16061  */
16062 static void
16063 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16064                         struct mlx5_flow_meter_info *fm)
16065 {
16066         struct mlx5_priv *priv = dev->data->dev_private;
16067         int i;
16068
16069         if (!fm || !priv->sh->config.dv_flow_en)
16070                 return;
16071         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16072                 if (fm->drop_rule[i]) {
16073                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16074                         fm->drop_rule[i] = NULL;
16075                 }
16076         }
16077 }
16078
16079 static void
16080 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16081 {
16082         struct mlx5_priv *priv = dev->data->dev_private;
16083         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16084         struct mlx5_flow_tbl_data_entry *tbl;
16085         int i, j;
16086
16087         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16088                 if (mtrmng->def_rule[i]) {
16089                         claim_zero(mlx5_flow_os_destroy_flow
16090                                         (mtrmng->def_rule[i]));
16091                         mtrmng->def_rule[i] = NULL;
16092                 }
16093                 if (mtrmng->def_matcher[i]) {
16094                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16095                                 struct mlx5_flow_tbl_data_entry, tbl);
16096                         mlx5_list_unregister(tbl->matchers,
16097                                              &mtrmng->def_matcher[i]->entry);
16098                         mtrmng->def_matcher[i] = NULL;
16099                 }
16100                 for (j = 0; j < MLX5_REG_BITS; j++) {
16101                         if (mtrmng->drop_matcher[i][j]) {
16102                                 tbl =
16103                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16104                                              struct mlx5_flow_tbl_data_entry,
16105                                              tbl);
16106                                 mlx5_list_unregister(tbl->matchers,
16107                                             &mtrmng->drop_matcher[i][j]->entry);
16108                                 mtrmng->drop_matcher[i][j] = NULL;
16109                         }
16110                 }
16111                 if (mtrmng->drop_tbl[i]) {
16112                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16113                                 mtrmng->drop_tbl[i]);
16114                         mtrmng->drop_tbl[i] = NULL;
16115                 }
16116         }
16117 }
16118
16119 /* Number of meter flow actions, count and jump or count and drop. */
16120 #define METER_ACTIONS 2
16121
16122 static void
16123 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16124                                     enum mlx5_meter_domain domain)
16125 {
16126         struct mlx5_priv *priv = dev->data->dev_private;
16127         struct mlx5_flow_meter_def_policy *def_policy =
16128                         priv->sh->mtrmng->def_policy[domain];
16129
16130         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16131         mlx5_free(def_policy);
16132         priv->sh->mtrmng->def_policy[domain] = NULL;
16133 }
16134
16135 /**
16136  * Destroy the default policy table set.
16137  *
16138  * @param[in] dev
16139  *   Pointer to Ethernet device.
16140  */
16141 static void
16142 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16143 {
16144         struct mlx5_priv *priv = dev->data->dev_private;
16145         int i;
16146
16147         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16148                 if (priv->sh->mtrmng->def_policy[i])
16149                         __flow_dv_destroy_domain_def_policy(dev,
16150                                         (enum mlx5_meter_domain)i);
16151         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16152 }
16153
16154 static int
16155 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16156                         uint32_t color_reg_c_idx,
16157                         enum rte_color color, void *matcher_object,
16158                         int actions_n, void *actions,
16159                         bool match_src_port, const struct rte_flow_item *item,
16160                         void **rule, const struct rte_flow_attr *attr)
16161 {
16162         int ret;
16163         struct mlx5_flow_dv_match_params value = {
16164                 .size = sizeof(value.buf),
16165         };
16166         struct mlx5_flow_dv_match_params matcher = {
16167                 .size = sizeof(matcher.buf),
16168         };
16169         struct mlx5_priv *priv = dev->data->dev_private;
16170         uint8_t misc_mask;
16171
16172         if (match_src_port && priv->sh->esw_mode) {
16173                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16174                                                    value.buf, item, attr)) {
16175                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16176                                 " value with port.", color);
16177                         return -1;
16178                 }
16179         }
16180         flow_dv_match_meta_reg(matcher.buf, value.buf,
16181                                (enum modify_reg)color_reg_c_idx,
16182                                rte_col_2_mlx5_col(color), UINT32_MAX);
16183         misc_mask = flow_dv_matcher_enable(value.buf);
16184         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16185         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16186                                        actions_n, actions, rule);
16187         if (ret) {
16188                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16189                 return -1;
16190         }
16191         return 0;
16192 }
16193
16194 static int
16195 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16196                         uint32_t color_reg_c_idx,
16197                         uint16_t priority,
16198                         struct mlx5_flow_meter_sub_policy *sub_policy,
16199                         const struct rte_flow_attr *attr,
16200                         bool match_src_port,
16201                         const struct rte_flow_item *item,
16202                         struct mlx5_flow_dv_matcher **policy_matcher,
16203                         struct rte_flow_error *error)
16204 {
16205         struct mlx5_list_entry *entry;
16206         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16207         struct mlx5_flow_dv_matcher matcher = {
16208                 .mask = {
16209                         .size = sizeof(matcher.mask.buf),
16210                 },
16211                 .tbl = tbl_rsc,
16212         };
16213         struct mlx5_flow_dv_match_params value = {
16214                 .size = sizeof(value.buf),
16215         };
16216         struct mlx5_flow_cb_ctx ctx = {
16217                 .error = error,
16218                 .data = &matcher,
16219         };
16220         struct mlx5_flow_tbl_data_entry *tbl_data;
16221         struct mlx5_priv *priv = dev->data->dev_private;
16222         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16223
16224         if (match_src_port && priv->sh->esw_mode) {
16225                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16226                                                    value.buf, item, attr)) {
16227                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16228                                 " with port.", priority);
16229                         return -1;
16230                 }
16231         }
16232         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16233         if (priority < RTE_COLOR_RED)
16234                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16235                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16236         matcher.priority = priority;
16237         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16238                                     matcher.mask.size);
16239         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16240         if (!entry) {
16241                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16242                 return -1;
16243         }
16244         *policy_matcher =
16245                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16246         return 0;
16247 }
16248
16249 /**
16250  * Create the policy rules per domain.
16251  *
16252  * @param[in] dev
16253  *   Pointer to Ethernet device.
16254  * @param[in] sub_policy
16255  *    Pointer to sub policy table..
16256  * @param[in] egress
16257  *   Direction of the table.
16258  * @param[in] transfer
16259  *   E-Switch or NIC flow.
16260  * @param[in] acts
16261  *   Pointer to policy action list per color.
16262  *
16263  * @return
16264  *   0 on success, -1 otherwise.
16265  */
16266 static int
16267 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16268                 struct mlx5_flow_meter_sub_policy *sub_policy,
16269                 uint8_t egress, uint8_t transfer, bool match_src_port,
16270                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16271 {
16272         struct mlx5_priv *priv = dev->data->dev_private;
16273         struct rte_flow_error flow_err;
16274         uint32_t color_reg_c_idx;
16275         struct rte_flow_attr attr = {
16276                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16277                 .priority = 0,
16278                 .ingress = 0,
16279                 .egress = !!egress,
16280                 .transfer = !!transfer,
16281                 .reserved = 0,
16282         };
16283         int i;
16284         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16285         struct mlx5_sub_policy_color_rule *color_rule;
16286         bool svport_match;
16287         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16288
16289         if (ret < 0)
16290                 return -1;
16291         /* Create policy table with POLICY level. */
16292         if (!sub_policy->tbl_rsc)
16293                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16294                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16295                                 egress, transfer, false, NULL, 0, 0,
16296                                 sub_policy->idx, &flow_err);
16297         if (!sub_policy->tbl_rsc) {
16298                 DRV_LOG(ERR,
16299                         "Failed to create meter sub policy table.");
16300                 return -1;
16301         }
16302         /* Prepare matchers. */
16303         color_reg_c_idx = ret;
16304         for (i = 0; i < RTE_COLORS; i++) {
16305                 TAILQ_INIT(&sub_policy->color_rules[i]);
16306                 if (!acts[i].actions_n)
16307                         continue;
16308                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16309                                 sizeof(struct mlx5_sub_policy_color_rule),
16310                                 0, SOCKET_ID_ANY);
16311                 if (!color_rule) {
16312                         DRV_LOG(ERR, "No memory to create color rule.");
16313                         goto err_exit;
16314                 }
16315                 tmp_rules[i] = color_rule;
16316                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16317                                   color_rule, next_port);
16318                 color_rule->src_port = priv->representor_id;
16319                 /* No use. */
16320                 attr.priority = i;
16321                 /* Create matchers for colors. */
16322                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16323                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16324                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16325                                 &attr, svport_match, NULL,
16326                                 &color_rule->matcher, &flow_err)) {
16327                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16328                         goto err_exit;
16329                 }
16330                 /* Create flow, matching color. */
16331                 if (__flow_dv_create_policy_flow(dev,
16332                                 color_reg_c_idx, (enum rte_color)i,
16333                                 color_rule->matcher->matcher_object,
16334                                 acts[i].actions_n, acts[i].dv_actions,
16335                                 svport_match, NULL, &color_rule->rule,
16336                                 &attr)) {
16337                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16338                         goto err_exit;
16339                 }
16340         }
16341         return 0;
16342 err_exit:
16343         /* All the policy rules will be cleared. */
16344         do {
16345                 color_rule = tmp_rules[i];
16346                 if (color_rule) {
16347                         if (color_rule->rule)
16348                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16349                         if (color_rule->matcher) {
16350                                 struct mlx5_flow_tbl_data_entry *tbl =
16351                                         container_of(color_rule->matcher->tbl,
16352                                                      typeof(*tbl), tbl);
16353                                 mlx5_list_unregister(tbl->matchers,
16354                                                 &color_rule->matcher->entry);
16355                         }
16356                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16357                                      color_rule, next_port);
16358                         mlx5_free(color_rule);
16359                 }
16360         } while (i--);
16361         return -1;
16362 }
16363
16364 static int
16365 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16366                         struct mlx5_flow_meter_policy *mtr_policy,
16367                         struct mlx5_flow_meter_sub_policy *sub_policy,
16368                         uint32_t domain)
16369 {
16370         struct mlx5_priv *priv = dev->data->dev_private;
16371         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16372         struct mlx5_flow_dv_tag_resource *tag;
16373         struct mlx5_flow_dv_port_id_action_resource *port_action;
16374         struct mlx5_hrxq *hrxq;
16375         struct mlx5_flow_meter_info *next_fm = NULL;
16376         struct mlx5_flow_meter_policy *next_policy;
16377         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16378         struct mlx5_flow_tbl_data_entry *tbl_data;
16379         struct rte_flow_error error;
16380         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16381         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16382         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16383         bool match_src_port = false;
16384         int i;
16385
16386         /* If RSS or Queue, no previous actions / rules is created. */
16387         for (i = 0; i < RTE_COLORS; i++) {
16388                 acts[i].actions_n = 0;
16389                 if (i == RTE_COLOR_RED) {
16390                         /* Only support drop on red. */
16391                         acts[i].dv_actions[0] =
16392                                 mtr_policy->dr_drop_action[domain];
16393                         acts[i].actions_n = 1;
16394                         continue;
16395                 }
16396                 if (i == RTE_COLOR_GREEN &&
16397                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16398                         struct rte_flow_attr attr = {
16399                                 .transfer = transfer
16400                         };
16401
16402                         next_fm = mlx5_flow_meter_find(priv,
16403                                         mtr_policy->act_cnt[i].next_mtr_id,
16404                                         NULL);
16405                         if (!next_fm) {
16406                                 DRV_LOG(ERR,
16407                                         "Failed to get next hierarchy meter.");
16408                                 goto err_exit;
16409                         }
16410                         if (mlx5_flow_meter_attach(priv, next_fm,
16411                                                    &attr, &error)) {
16412                                 DRV_LOG(ERR, "%s", error.message);
16413                                 next_fm = NULL;
16414                                 goto err_exit;
16415                         }
16416                         /* Meter action must be the first for TX. */
16417                         if (mtr_first) {
16418                                 acts[i].dv_actions[acts[i].actions_n] =
16419                                         next_fm->meter_action;
16420                                 acts[i].actions_n++;
16421                         }
16422                 }
16423                 if (mtr_policy->act_cnt[i].rix_mark) {
16424                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16425                                         mtr_policy->act_cnt[i].rix_mark);
16426                         if (!tag) {
16427                                 DRV_LOG(ERR, "Failed to find "
16428                                 "mark action for policy.");
16429                                 goto err_exit;
16430                         }
16431                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16432                         acts[i].actions_n++;
16433                 }
16434                 if (mtr_policy->act_cnt[i].modify_hdr) {
16435                         acts[i].dv_actions[acts[i].actions_n] =
16436                                 mtr_policy->act_cnt[i].modify_hdr->action;
16437                         acts[i].actions_n++;
16438                 }
16439                 if (mtr_policy->act_cnt[i].fate_action) {
16440                         switch (mtr_policy->act_cnt[i].fate_action) {
16441                         case MLX5_FLOW_FATE_PORT_ID:
16442                                 port_action = mlx5_ipool_get
16443                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16444                                 mtr_policy->act_cnt[i].rix_port_id_action);
16445                                 if (!port_action) {
16446                                         DRV_LOG(ERR, "Failed to find "
16447                                                 "port action for policy.");
16448                                         goto err_exit;
16449                                 }
16450                                 acts[i].dv_actions[acts[i].actions_n] =
16451                                         port_action->action;
16452                                 acts[i].actions_n++;
16453                                 mtr_policy->dev = dev;
16454                                 match_src_port = true;
16455                                 break;
16456                         case MLX5_FLOW_FATE_DROP:
16457                         case MLX5_FLOW_FATE_JUMP:
16458                                 acts[i].dv_actions[acts[i].actions_n] =
16459                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16460                                 acts[i].actions_n++;
16461                                 break;
16462                         case MLX5_FLOW_FATE_SHARED_RSS:
16463                         case MLX5_FLOW_FATE_QUEUE:
16464                                 hrxq = mlx5_ipool_get
16465                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16466                                          sub_policy->rix_hrxq[i]);
16467                                 if (!hrxq) {
16468                                         DRV_LOG(ERR, "Failed to find "
16469                                                 "queue action for policy.");
16470                                         goto err_exit;
16471                                 }
16472                                 acts[i].dv_actions[acts[i].actions_n] =
16473                                         hrxq->action;
16474                                 acts[i].actions_n++;
16475                                 break;
16476                         case MLX5_FLOW_FATE_MTR:
16477                                 if (!next_fm) {
16478                                         DRV_LOG(ERR,
16479                                                 "No next hierarchy meter.");
16480                                         goto err_exit;
16481                                 }
16482                                 if (!mtr_first) {
16483                                         acts[i].dv_actions[acts[i].actions_n] =
16484                                                         next_fm->meter_action;
16485                                         acts[i].actions_n++;
16486                                 }
16487                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16488                                         next_sub_policy =
16489                                         mtr_policy->act_cnt[i].next_sub_policy;
16490                                 } else {
16491                                         next_policy =
16492                                                 mlx5_flow_meter_policy_find(dev,
16493                                                 next_fm->policy_id, NULL);
16494                                         MLX5_ASSERT(next_policy);
16495                                         next_sub_policy =
16496                                         next_policy->sub_policys[domain][0];
16497                                 }
16498                                 tbl_data =
16499                                         container_of(next_sub_policy->tbl_rsc,
16500                                         struct mlx5_flow_tbl_data_entry, tbl);
16501                                 acts[i].dv_actions[acts[i].actions_n++] =
16502                                                         tbl_data->jump.action;
16503                                 if (mtr_policy->act_cnt[i].modify_hdr)
16504                                         match_src_port = !!transfer;
16505                                 break;
16506                         default:
16507                                 /*Queue action do nothing*/
16508                                 break;
16509                         }
16510                 }
16511         }
16512         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16513                                 egress, transfer, match_src_port, acts)) {
16514                 DRV_LOG(ERR,
16515                         "Failed to create policy rules per domain.");
16516                 goto err_exit;
16517         }
16518         return 0;
16519 err_exit:
16520         if (next_fm)
16521                 mlx5_flow_meter_detach(priv, next_fm);
16522         return -1;
16523 }
16524
16525 /**
16526  * Create the policy rules.
16527  *
16528  * @param[in] dev
16529  *   Pointer to Ethernet device.
16530  * @param[in,out] mtr_policy
16531  *   Pointer to meter policy table.
16532  *
16533  * @return
16534  *   0 on success, -1 otherwise.
16535  */
16536 static int
16537 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16538                              struct mlx5_flow_meter_policy *mtr_policy)
16539 {
16540         int i;
16541         uint16_t sub_policy_num;
16542
16543         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16544                 sub_policy_num = (mtr_policy->sub_policy_num >>
16545                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16546                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16547                 if (!sub_policy_num)
16548                         continue;
16549                 /* Prepare actions list and create policy rules. */
16550                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16551                         mtr_policy->sub_policys[i][0], i)) {
16552                         DRV_LOG(ERR, "Failed to create policy action "
16553                                 "list per domain.");
16554                         return -1;
16555                 }
16556         }
16557         return 0;
16558 }
16559
16560 static int
16561 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16562 {
16563         struct mlx5_priv *priv = dev->data->dev_private;
16564         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16565         struct mlx5_flow_meter_def_policy *def_policy;
16566         struct mlx5_flow_tbl_resource *jump_tbl;
16567         struct mlx5_flow_tbl_data_entry *tbl_data;
16568         uint8_t egress, transfer;
16569         struct rte_flow_error error;
16570         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16571         int ret;
16572
16573         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16574         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16575         def_policy = mtrmng->def_policy[domain];
16576         if (!def_policy) {
16577                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16578                         sizeof(struct mlx5_flow_meter_def_policy),
16579                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16580                 if (!def_policy) {
16581                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16582                         goto def_policy_error;
16583                 }
16584                 mtrmng->def_policy[domain] = def_policy;
16585                 /* Create the meter suffix table with SUFFIX level. */
16586                 jump_tbl = flow_dv_tbl_resource_get(dev,
16587                                 MLX5_FLOW_TABLE_LEVEL_METER,
16588                                 egress, transfer, false, NULL, 0,
16589                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16590                 if (!jump_tbl) {
16591                         DRV_LOG(ERR,
16592                                 "Failed to create meter suffix table.");
16593                         goto def_policy_error;
16594                 }
16595                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16596                 tbl_data = container_of(jump_tbl,
16597                                         struct mlx5_flow_tbl_data_entry, tbl);
16598                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16599                                                 tbl_data->jump.action;
16600                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16601                 acts[RTE_COLOR_GREEN].actions_n = 1;
16602                 /*
16603                  * YELLOW has the same default policy as GREEN does.
16604                  * G & Y share the same table and action. The 2nd time of table
16605                  * resource getting is just to update the reference count for
16606                  * the releasing stage.
16607                  */
16608                 jump_tbl = flow_dv_tbl_resource_get(dev,
16609                                 MLX5_FLOW_TABLE_LEVEL_METER,
16610                                 egress, transfer, false, NULL, 0,
16611                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16612                 if (!jump_tbl) {
16613                         DRV_LOG(ERR,
16614                                 "Failed to get meter suffix table.");
16615                         goto def_policy_error;
16616                 }
16617                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16618                 tbl_data = container_of(jump_tbl,
16619                                         struct mlx5_flow_tbl_data_entry, tbl);
16620                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16621                                                 tbl_data->jump.action;
16622                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16623                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16624                 /* Create jump action to the drop table. */
16625                 if (!mtrmng->drop_tbl[domain]) {
16626                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16627                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16628                                  egress, transfer, false, NULL, 0,
16629                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16630                         if (!mtrmng->drop_tbl[domain]) {
16631                                 DRV_LOG(ERR, "Failed to create meter "
16632                                         "drop table for default policy.");
16633                                 goto def_policy_error;
16634                         }
16635                 }
16636                 /* all RED: unique Drop table for jump action. */
16637                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16638                                         struct mlx5_flow_tbl_data_entry, tbl);
16639                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16640                                                 tbl_data->jump.action;
16641                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16642                 acts[RTE_COLOR_RED].actions_n = 1;
16643                 /* Create default policy rules. */
16644                 ret = __flow_dv_create_domain_policy_rules(dev,
16645                                         &def_policy->sub_policy,
16646                                         egress, transfer, false, acts);
16647                 if (ret) {
16648                         DRV_LOG(ERR, "Failed to create default policy rules.");
16649                         goto def_policy_error;
16650                 }
16651         }
16652         return 0;
16653 def_policy_error:
16654         __flow_dv_destroy_domain_def_policy(dev,
16655                                             (enum mlx5_meter_domain)domain);
16656         return -1;
16657 }
16658
16659 /**
16660  * Create the default policy table set.
16661  *
16662  * @param[in] dev
16663  *   Pointer to Ethernet device.
16664  * @return
16665  *   0 on success, -1 otherwise.
16666  */
16667 static int
16668 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16669 {
16670         struct mlx5_priv *priv = dev->data->dev_private;
16671         int i;
16672
16673         /* Non-termination policy table. */
16674         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16675                 if (!priv->sh->config.dv_esw_en &&
16676                     i == MLX5_MTR_DOMAIN_TRANSFER)
16677                         continue;
16678                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16679                         DRV_LOG(ERR, "Failed to create default policy");
16680                         /* Rollback the created default policies for others. */
16681                         flow_dv_destroy_def_policy(dev);
16682                         return -1;
16683                 }
16684         }
16685         return 0;
16686 }
16687
16688 /**
16689  * Create the needed meter tables.
16690  * Lock free, (mutex should be acquired by caller).
16691  *
16692  * @param[in] dev
16693  *   Pointer to Ethernet device.
16694  * @param[in] fm
16695  *   Meter information table.
16696  * @param[in] mtr_idx
16697  *   Meter index.
16698  * @param[in] domain_bitmap
16699  *   Domain bitmap.
16700  * @return
16701  *   0 on success, -1 otherwise.
16702  */
16703 static int
16704 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16705                         struct mlx5_flow_meter_info *fm,
16706                         uint32_t mtr_idx,
16707                         uint8_t domain_bitmap)
16708 {
16709         struct mlx5_priv *priv = dev->data->dev_private;
16710         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16711         struct rte_flow_error error;
16712         struct mlx5_flow_tbl_data_entry *tbl_data;
16713         uint8_t egress, transfer;
16714         void *actions[METER_ACTIONS];
16715         int domain, ret, i;
16716         struct mlx5_flow_counter *cnt;
16717         struct mlx5_flow_dv_match_params value = {
16718                 .size = sizeof(value.buf),
16719         };
16720         struct mlx5_flow_dv_match_params matcher_para = {
16721                 .size = sizeof(matcher_para.buf),
16722         };
16723         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16724                                                      0, &error);
16725         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16726         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16727         struct mlx5_list_entry *entry;
16728         struct mlx5_flow_dv_matcher matcher = {
16729                 .mask = {
16730                         .size = sizeof(matcher.mask.buf),
16731                 },
16732         };
16733         struct mlx5_flow_dv_matcher *drop_matcher;
16734         struct mlx5_flow_cb_ctx ctx = {
16735                 .error = &error,
16736                 .data = &matcher,
16737         };
16738         uint8_t misc_mask;
16739
16740         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16741                 rte_errno = ENOTSUP;
16742                 return -1;
16743         }
16744         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16745                 if (!(domain_bitmap & (1 << domain)) ||
16746                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16747                         continue;
16748                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16749                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16750                 /* Create the drop table with METER DROP level. */
16751                 if (!mtrmng->drop_tbl[domain]) {
16752                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16753                                         MLX5_FLOW_TABLE_LEVEL_METER,
16754                                         egress, transfer, false, NULL, 0,
16755                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16756                         if (!mtrmng->drop_tbl[domain]) {
16757                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16758                                 goto policy_error;
16759                         }
16760                 }
16761                 /* Create default matcher in drop table. */
16762                 matcher.tbl = mtrmng->drop_tbl[domain],
16763                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16764                                 struct mlx5_flow_tbl_data_entry, tbl);
16765                 if (!mtrmng->def_matcher[domain]) {
16766                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16767                                        (enum modify_reg)mtr_id_reg_c,
16768                                        0, 0);
16769                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16770                         matcher.crc = rte_raw_cksum
16771                                         ((const void *)matcher.mask.buf,
16772                                         matcher.mask.size);
16773                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16774                         if (!entry) {
16775                                 DRV_LOG(ERR, "Failed to register meter "
16776                                 "drop default matcher.");
16777                                 goto policy_error;
16778                         }
16779                         mtrmng->def_matcher[domain] = container_of(entry,
16780                         struct mlx5_flow_dv_matcher, entry);
16781                 }
16782                 /* Create default rule in drop table. */
16783                 if (!mtrmng->def_rule[domain]) {
16784                         i = 0;
16785                         actions[i++] = priv->sh->dr_drop_action;
16786                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16787                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16788                         misc_mask = flow_dv_matcher_enable(value.buf);
16789                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16790                         ret = mlx5_flow_os_create_flow
16791                                 (mtrmng->def_matcher[domain]->matcher_object,
16792                                 (void *)&value, i, actions,
16793                                 &mtrmng->def_rule[domain]);
16794                         if (ret) {
16795                                 DRV_LOG(ERR, "Failed to create meter "
16796                                 "default drop rule for drop table.");
16797                                 goto policy_error;
16798                         }
16799                 }
16800                 if (!fm->drop_cnt)
16801                         continue;
16802                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16803                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16804                         /* Create matchers for Drop. */
16805                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16806                                         (enum modify_reg)mtr_id_reg_c, 0,
16807                                         (mtr_id_mask << mtr_id_offset));
16808                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16809                         matcher.crc = rte_raw_cksum
16810                                         ((const void *)matcher.mask.buf,
16811                                         matcher.mask.size);
16812                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16813                         if (!entry) {
16814                                 DRV_LOG(ERR,
16815                                 "Failed to register meter drop matcher.");
16816                                 goto policy_error;
16817                         }
16818                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16819                                 container_of(entry, struct mlx5_flow_dv_matcher,
16820                                              entry);
16821                 }
16822                 drop_matcher =
16823                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16824                 /* Create drop rule, matching meter_id only. */
16825                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16826                                 (enum modify_reg)mtr_id_reg_c,
16827                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16828                 i = 0;
16829                 cnt = flow_dv_counter_get_by_idx(dev,
16830                                         fm->drop_cnt, NULL);
16831                 actions[i++] = cnt->action;
16832                 actions[i++] = priv->sh->dr_drop_action;
16833                 misc_mask = flow_dv_matcher_enable(value.buf);
16834                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16835                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16836                                                (void *)&value, i, actions,
16837                                                &fm->drop_rule[domain]);
16838                 if (ret) {
16839                         DRV_LOG(ERR, "Failed to create meter "
16840                                 "drop rule for drop table.");
16841                                 goto policy_error;
16842                 }
16843         }
16844         return 0;
16845 policy_error:
16846         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16847                 if (fm->drop_rule[i]) {
16848                         claim_zero(mlx5_flow_os_destroy_flow
16849                                 (fm->drop_rule[i]));
16850                         fm->drop_rule[i] = NULL;
16851                 }
16852         }
16853         return -1;
16854 }
16855
16856 static struct mlx5_flow_meter_sub_policy *
16857 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16858                 struct mlx5_flow_meter_policy *mtr_policy,
16859                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16860                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16861                 bool *is_reuse)
16862 {
16863         struct mlx5_priv *priv = dev->data->dev_private;
16864         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16865         uint32_t sub_policy_idx = 0;
16866         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16867         uint32_t i, j;
16868         struct mlx5_hrxq *hrxq;
16869         struct mlx5_flow_handle dh;
16870         struct mlx5_meter_policy_action_container *act_cnt;
16871         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16872         uint16_t sub_policy_num;
16873         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
16874
16875         MLX5_ASSERT(wks);
16876         rte_spinlock_lock(&mtr_policy->sl);
16877         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16878                 if (!rss_desc[i])
16879                         continue;
16880                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16881                 if (!hrxq_idx[i]) {
16882                         rte_spinlock_unlock(&mtr_policy->sl);
16883                         return NULL;
16884                 }
16885         }
16886         sub_policy_num = (mtr_policy->sub_policy_num >>
16887                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16888                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16889         for (j = 0; j < sub_policy_num; j++) {
16890                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16891                         if (rss_desc[i] &&
16892                             hrxq_idx[i] !=
16893                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16894                                 break;
16895                 }
16896                 if (i >= MLX5_MTR_RTE_COLORS) {
16897                         /*
16898                          * Found the sub policy table with
16899                          * the same queue per color.
16900                          */
16901                         rte_spinlock_unlock(&mtr_policy->sl);
16902                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16903                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16904                         *is_reuse = true;
16905                         return mtr_policy->sub_policys[domain][j];
16906                 }
16907         }
16908         /* Create sub policy. */
16909         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16910                 /* Reuse the first pre-allocated sub_policy. */
16911                 sub_policy = mtr_policy->sub_policys[domain][0];
16912                 sub_policy_idx = sub_policy->idx;
16913         } else {
16914                 sub_policy = mlx5_ipool_zmalloc
16915                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16916                                  &sub_policy_idx);
16917                 if (!sub_policy ||
16918                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16919                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16920                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16921                         goto rss_sub_policy_error;
16922                 }
16923                 sub_policy->idx = sub_policy_idx;
16924                 sub_policy->main_policy = mtr_policy;
16925         }
16926         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16927                 if (!rss_desc[i])
16928                         continue;
16929                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16930                 if (mtr_policy->is_hierarchy) {
16931                         act_cnt = &mtr_policy->act_cnt[i];
16932                         act_cnt->next_sub_policy = next_sub_policy;
16933                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16934                 } else {
16935                         /*
16936                          * Overwrite the last action from
16937                          * RSS action to Queue action.
16938                          */
16939                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16940                                               hrxq_idx[i]);
16941                         if (!hrxq) {
16942                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16943                                 goto rss_sub_policy_error;
16944                         }
16945                         act_cnt = &mtr_policy->act_cnt[i];
16946                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16947                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16948                                 if (act_cnt->rix_mark)
16949                                         wks->mark = 1;
16950                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16951                                 dh.rix_hrxq = hrxq_idx[i];
16952                                 flow_drv_rxq_flags_set(dev, &dh);
16953                         }
16954                 }
16955         }
16956         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16957                                                sub_policy, domain)) {
16958                 DRV_LOG(ERR, "Failed to create policy "
16959                         "rules for ingress domain.");
16960                 goto rss_sub_policy_error;
16961         }
16962         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16963                 i = (mtr_policy->sub_policy_num >>
16964                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16965                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16966                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16967                         DRV_LOG(ERR, "No free sub-policy slot.");
16968                         goto rss_sub_policy_error;
16969                 }
16970                 mtr_policy->sub_policys[domain][i] = sub_policy;
16971                 i++;
16972                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16973                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16974                 mtr_policy->sub_policy_num |=
16975                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16976                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16977         }
16978         rte_spinlock_unlock(&mtr_policy->sl);
16979         *is_reuse = false;
16980         return sub_policy;
16981 rss_sub_policy_error:
16982         if (sub_policy) {
16983                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16984                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16985                         i = (mtr_policy->sub_policy_num >>
16986                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16987                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16988                         mtr_policy->sub_policys[domain][i] = NULL;
16989                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16990                                         sub_policy->idx);
16991                 }
16992         }
16993         rte_spinlock_unlock(&mtr_policy->sl);
16994         return NULL;
16995 }
16996
16997 /**
16998  * Find the policy table for prefix table with RSS.
16999  *
17000  * @param[in] dev
17001  *   Pointer to Ethernet device.
17002  * @param[in] mtr_policy
17003  *   Pointer to meter policy table.
17004  * @param[in] rss_desc
17005  *   Pointer to rss_desc
17006  * @return
17007  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17008  */
17009 static struct mlx5_flow_meter_sub_policy *
17010 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17011                 struct mlx5_flow_meter_policy *mtr_policy,
17012                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17013 {
17014         struct mlx5_priv *priv = dev->data->dev_private;
17015         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17016         struct mlx5_flow_meter_info *next_fm;
17017         struct mlx5_flow_meter_policy *next_policy;
17018         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17019         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17020         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17021         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17022         bool reuse_sub_policy;
17023         uint32_t i = 0;
17024         uint32_t j = 0;
17025
17026         while (true) {
17027                 /* Iterate hierarchy to get all policies in this hierarchy. */
17028                 policies[i++] = mtr_policy;
17029                 if (!mtr_policy->is_hierarchy)
17030                         break;
17031                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17032                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17033                         return NULL;
17034                 }
17035                 next_fm = mlx5_flow_meter_find(priv,
17036                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17037                 if (!next_fm) {
17038                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17039                         return NULL;
17040                 }
17041                 next_policy =
17042                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17043                                                     NULL);
17044                 MLX5_ASSERT(next_policy);
17045                 mtr_policy = next_policy;
17046         }
17047         while (i) {
17048                 /**
17049                  * From last policy to the first one in hierarchy,
17050                  * create / get the sub policy for each of them.
17051                  */
17052                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17053                                                         policies[--i],
17054                                                         rss_desc,
17055                                                         next_sub_policy,
17056                                                         &reuse_sub_policy);
17057                 if (!sub_policy) {
17058                         DRV_LOG(ERR, "Failed to get the sub policy.");
17059                         goto err_exit;
17060                 }
17061                 if (!reuse_sub_policy)
17062                         sub_policies[j++] = sub_policy;
17063                 next_sub_policy = sub_policy;
17064         }
17065         return sub_policy;
17066 err_exit:
17067         while (j) {
17068                 uint16_t sub_policy_num;
17069
17070                 sub_policy = sub_policies[--j];
17071                 mtr_policy = sub_policy->main_policy;
17072                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17073                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17074                         sub_policy_num = (mtr_policy->sub_policy_num >>
17075                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17076                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17077                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17078                                                                         NULL;
17079                         sub_policy_num--;
17080                         mtr_policy->sub_policy_num &=
17081                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17082                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17083                         mtr_policy->sub_policy_num |=
17084                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17085                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17086                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17087                                         sub_policy->idx);
17088                 }
17089         }
17090         return NULL;
17091 }
17092
17093 /**
17094  * Create the sub policy tag rule for all meters in hierarchy.
17095  *
17096  * @param[in] dev
17097  *   Pointer to Ethernet device.
17098  * @param[in] fm
17099  *   Meter information table.
17100  * @param[in] src_port
17101  *   The src port this extra rule should use.
17102  * @param[in] item
17103  *   The src port match item.
17104  * @param[out] error
17105  *   Perform verbose error reporting if not NULL.
17106  * @return
17107  *   0 on success, a negative errno value otherwise and rte_errno is set.
17108  */
17109 static int
17110 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17111                                 struct mlx5_flow_meter_info *fm,
17112                                 int32_t src_port,
17113                                 const struct rte_flow_item *item,
17114                                 struct rte_flow_error *error)
17115 {
17116         struct mlx5_priv *priv = dev->data->dev_private;
17117         struct mlx5_flow_meter_policy *mtr_policy;
17118         struct mlx5_flow_meter_sub_policy *sub_policy;
17119         struct mlx5_flow_meter_info *next_fm = NULL;
17120         struct mlx5_flow_meter_policy *next_policy;
17121         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17122         struct mlx5_flow_tbl_data_entry *tbl_data;
17123         struct mlx5_sub_policy_color_rule *color_rule;
17124         struct mlx5_meter_policy_acts acts;
17125         uint32_t color_reg_c_idx;
17126         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17127         struct rte_flow_attr attr = {
17128                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17129                 .priority = 0,
17130                 .ingress = 0,
17131                 .egress = 0,
17132                 .transfer = 1,
17133                 .reserved = 0,
17134         };
17135         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17136         int i;
17137
17138         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17139         MLX5_ASSERT(mtr_policy);
17140         if (!mtr_policy->is_hierarchy)
17141                 return 0;
17142         next_fm = mlx5_flow_meter_find(priv,
17143                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17144         if (!next_fm) {
17145                 return rte_flow_error_set(error, EINVAL,
17146                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17147                                 "Failed to find next meter in hierarchy.");
17148         }
17149         if (!next_fm->drop_cnt)
17150                 goto exit;
17151         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17152         sub_policy = mtr_policy->sub_policys[domain][0];
17153         for (i = 0; i < RTE_COLORS; i++) {
17154                 bool rule_exist = false;
17155                 struct mlx5_meter_policy_action_container *act_cnt;
17156
17157                 if (i >= RTE_COLOR_YELLOW)
17158                         break;
17159                 TAILQ_FOREACH(color_rule,
17160                               &sub_policy->color_rules[i], next_port)
17161                         if (color_rule->src_port == src_port) {
17162                                 rule_exist = true;
17163                                 break;
17164                         }
17165                 if (rule_exist)
17166                         continue;
17167                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17168                                 sizeof(struct mlx5_sub_policy_color_rule),
17169                                 0, SOCKET_ID_ANY);
17170                 if (!color_rule)
17171                         return rte_flow_error_set(error, ENOMEM,
17172                                 RTE_FLOW_ERROR_TYPE_ACTION,
17173                                 NULL, "No memory to create tag color rule.");
17174                 color_rule->src_port = src_port;
17175                 attr.priority = i;
17176                 next_policy = mlx5_flow_meter_policy_find(dev,
17177                                                 next_fm->policy_id, NULL);
17178                 MLX5_ASSERT(next_policy);
17179                 next_sub_policy = next_policy->sub_policys[domain][0];
17180                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17181                                         struct mlx5_flow_tbl_data_entry, tbl);
17182                 act_cnt = &mtr_policy->act_cnt[i];
17183                 if (mtr_first) {
17184                         acts.dv_actions[0] = next_fm->meter_action;
17185                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17186                 } else {
17187                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17188                         acts.dv_actions[1] = next_fm->meter_action;
17189                 }
17190                 acts.dv_actions[2] = tbl_data->jump.action;
17191                 acts.actions_n = 3;
17192                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17193                         next_fm = NULL;
17194                         goto err_exit;
17195                 }
17196                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17197                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17198                                 &attr, true, item,
17199                                 &color_rule->matcher, error)) {
17200                         rte_flow_error_set(error, errno,
17201                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17202                                 "Failed to create hierarchy meter matcher.");
17203                         goto err_exit;
17204                 }
17205                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17206                                         (enum rte_color)i,
17207                                         color_rule->matcher->matcher_object,
17208                                         acts.actions_n, acts.dv_actions,
17209                                         true, item,
17210                                         &color_rule->rule, &attr)) {
17211                         rte_flow_error_set(error, errno,
17212                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17213                                 "Failed to create hierarchy meter rule.");
17214                         goto err_exit;
17215                 }
17216                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17217                                   color_rule, next_port);
17218         }
17219 exit:
17220         /**
17221          * Recursive call to iterate all meters in hierarchy and
17222          * create needed rules.
17223          */
17224         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17225                                                 src_port, item, error);
17226 err_exit:
17227         if (color_rule) {
17228                 if (color_rule->rule)
17229                         mlx5_flow_os_destroy_flow(color_rule->rule);
17230                 if (color_rule->matcher) {
17231                         struct mlx5_flow_tbl_data_entry *tbl =
17232                                 container_of(color_rule->matcher->tbl,
17233                                                 typeof(*tbl), tbl);
17234                         mlx5_list_unregister(tbl->matchers,
17235                                                 &color_rule->matcher->entry);
17236                 }
17237                 mlx5_free(color_rule);
17238         }
17239         if (next_fm)
17240                 mlx5_flow_meter_detach(priv, next_fm);
17241         return -rte_errno;
17242 }
17243
17244 /**
17245  * Destroy the sub policy table with RX queue.
17246  *
17247  * @param[in] dev
17248  *   Pointer to Ethernet device.
17249  * @param[in] mtr_policy
17250  *   Pointer to meter policy table.
17251  */
17252 static void
17253 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17254                                     struct mlx5_flow_meter_policy *mtr_policy)
17255 {
17256         struct mlx5_priv *priv = dev->data->dev_private;
17257         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17258         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17259         uint32_t i, j;
17260         uint16_t sub_policy_num, new_policy_num;
17261
17262         rte_spinlock_lock(&mtr_policy->sl);
17263         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17264                 switch (mtr_policy->act_cnt[i].fate_action) {
17265                 case MLX5_FLOW_FATE_SHARED_RSS:
17266                         sub_policy_num = (mtr_policy->sub_policy_num >>
17267                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17268                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17269                         new_policy_num = sub_policy_num;
17270                         for (j = 0; j < sub_policy_num; j++) {
17271                                 sub_policy =
17272                                         mtr_policy->sub_policys[domain][j];
17273                                 if (sub_policy) {
17274                                         __flow_dv_destroy_sub_policy_rules(dev,
17275                                                 sub_policy);
17276                                 if (sub_policy !=
17277                                         mtr_policy->sub_policys[domain][0]) {
17278                                         mtr_policy->sub_policys[domain][j] =
17279                                                                 NULL;
17280                                         mlx5_ipool_free
17281                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17282                                                 sub_policy->idx);
17283                                                 new_policy_num--;
17284                                         }
17285                                 }
17286                         }
17287                         if (new_policy_num != sub_policy_num) {
17288                                 mtr_policy->sub_policy_num &=
17289                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17290                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17291                                 mtr_policy->sub_policy_num |=
17292                                 (new_policy_num &
17293                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17294                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17295                         }
17296                         break;
17297                 case MLX5_FLOW_FATE_QUEUE:
17298                         sub_policy = mtr_policy->sub_policys[domain][0];
17299                         __flow_dv_destroy_sub_policy_rules(dev,
17300                                                            sub_policy);
17301                         break;
17302                 default:
17303                         /*Other actions without queue and do nothing*/
17304                         break;
17305                 }
17306         }
17307         rte_spinlock_unlock(&mtr_policy->sl);
17308 }
17309 /**
17310  * Check whether the DR drop action is supported on the root table or not.
17311  *
17312  * Create a simple flow with DR drop action on root table to validate
17313  * if DR drop action on root table is supported or not.
17314  *
17315  * @param[in] dev
17316  *   Pointer to rte_eth_dev structure.
17317  *
17318  * @return
17319  *   0 on success, a negative errno value otherwise and rte_errno is set.
17320  */
17321 int
17322 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17323 {
17324         struct mlx5_priv *priv = dev->data->dev_private;
17325         struct mlx5_dev_ctx_shared *sh = priv->sh;
17326         struct mlx5_flow_dv_match_params mask = {
17327                 .size = sizeof(mask.buf),
17328         };
17329         struct mlx5_flow_dv_match_params value = {
17330                 .size = sizeof(value.buf),
17331         };
17332         struct mlx5dv_flow_matcher_attr dv_attr = {
17333                 .type = IBV_FLOW_ATTR_NORMAL,
17334                 .priority = 0,
17335                 .match_criteria_enable = 0,
17336                 .match_mask = (void *)&mask,
17337         };
17338         struct mlx5_flow_tbl_resource *tbl = NULL;
17339         void *matcher = NULL;
17340         void *flow = NULL;
17341         int ret = -1;
17342
17343         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17344                                         0, 0, 0, NULL);
17345         if (!tbl)
17346                 goto err;
17347         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17348         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17349         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17350                                                tbl->obj, &matcher);
17351         if (ret)
17352                 goto err;
17353         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17354         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17355                                        &sh->dr_drop_action, &flow);
17356 err:
17357         /*
17358          * If DR drop action is not supported on root table, flow create will
17359          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17360          */
17361         if (!flow) {
17362                 if (matcher &&
17363                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17364                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17365                 else
17366                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17367                 ret = -1;
17368         } else {
17369                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17370         }
17371         if (matcher)
17372                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17373         if (tbl)
17374                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17375         return ret;
17376 }
17377
17378 /**
17379  * Validate the batch counter support in root table.
17380  *
17381  * Create a simple flow with invalid counter and drop action on root table to
17382  * validate if batch counter with offset on root table is supported or not.
17383  *
17384  * @param[in] dev
17385  *   Pointer to rte_eth_dev structure.
17386  *
17387  * @return
17388  *   0 on success, a negative errno value otherwise and rte_errno is set.
17389  */
17390 int
17391 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17392 {
17393         struct mlx5_priv *priv = dev->data->dev_private;
17394         struct mlx5_dev_ctx_shared *sh = priv->sh;
17395         struct mlx5_flow_dv_match_params mask = {
17396                 .size = sizeof(mask.buf),
17397         };
17398         struct mlx5_flow_dv_match_params value = {
17399                 .size = sizeof(value.buf),
17400         };
17401         struct mlx5dv_flow_matcher_attr dv_attr = {
17402                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17403                 .priority = 0,
17404                 .match_criteria_enable = 0,
17405                 .match_mask = (void *)&mask,
17406         };
17407         void *actions[2] = { 0 };
17408         struct mlx5_flow_tbl_resource *tbl = NULL;
17409         struct mlx5_devx_obj *dcs = NULL;
17410         void *matcher = NULL;
17411         void *flow = NULL;
17412         int ret = -1;
17413
17414         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17415                                         0, 0, 0, NULL);
17416         if (!tbl)
17417                 goto err;
17418         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17419         if (!dcs)
17420                 goto err;
17421         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17422                                                     &actions[0]);
17423         if (ret)
17424                 goto err;
17425         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17426         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17427         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17428                                                tbl->obj, &matcher);
17429         if (ret)
17430                 goto err;
17431         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17432         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17433                                        actions, &flow);
17434 err:
17435         /*
17436          * If batch counter with offset is not supported, the driver will not
17437          * validate the invalid offset value, flow create should success.
17438          * In this case, it means batch counter is not supported in root table.
17439          *
17440          * Otherwise, if flow create is failed, counter offset is supported.
17441          */
17442         if (flow) {
17443                 DRV_LOG(INFO, "Batch counter is not supported in root "
17444                               "table. Switch to fallback mode.");
17445                 rte_errno = ENOTSUP;
17446                 ret = -rte_errno;
17447                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17448         } else {
17449                 /* Check matcher to make sure validate fail at flow create. */
17450                 if (!matcher || (matcher && errno != EINVAL))
17451                         DRV_LOG(ERR, "Unexpected error in counter offset "
17452                                      "support detection");
17453                 ret = 0;
17454         }
17455         if (actions[0])
17456                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17457         if (matcher)
17458                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17459         if (tbl)
17460                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17461         if (dcs)
17462                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17463         return ret;
17464 }
17465
17466 /**
17467  * Query a devx counter.
17468  *
17469  * @param[in] dev
17470  *   Pointer to the Ethernet device structure.
17471  * @param[in] cnt
17472  *   Index to the flow counter.
17473  * @param[in] clear
17474  *   Set to clear the counter statistics.
17475  * @param[out] pkts
17476  *   The statistics value of packets.
17477  * @param[out] bytes
17478  *   The statistics value of bytes.
17479  *
17480  * @return
17481  *   0 on success, otherwise return -1.
17482  */
17483 static int
17484 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17485                       uint64_t *pkts, uint64_t *bytes)
17486 {
17487         struct mlx5_priv *priv = dev->data->dev_private;
17488         struct mlx5_flow_counter *cnt;
17489         uint64_t inn_pkts, inn_bytes;
17490         int ret;
17491
17492         if (!priv->sh->cdev->config.devx)
17493                 return -1;
17494
17495         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17496         if (ret)
17497                 return -1;
17498         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17499         *pkts = inn_pkts - cnt->hits;
17500         *bytes = inn_bytes - cnt->bytes;
17501         if (clear) {
17502                 cnt->hits = inn_pkts;
17503                 cnt->bytes = inn_bytes;
17504         }
17505         return 0;
17506 }
17507
17508 /**
17509  * Get aged-out flows.
17510  *
17511  * @param[in] dev
17512  *   Pointer to the Ethernet device structure.
17513  * @param[in] context
17514  *   The address of an array of pointers to the aged-out flows contexts.
17515  * @param[in] nb_contexts
17516  *   The length of context array pointers.
17517  * @param[out] error
17518  *   Perform verbose error reporting if not NULL. Initialized in case of
17519  *   error only.
17520  *
17521  * @return
17522  *   how many contexts get in success, otherwise negative errno value.
17523  *   if nb_contexts is 0, return the amount of all aged contexts.
17524  *   if nb_contexts is not 0 , return the amount of aged flows reported
17525  *   in the context array.
17526  * @note: only stub for now
17527  */
17528 static int
17529 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17530                     void **context,
17531                     uint32_t nb_contexts,
17532                     struct rte_flow_error *error)
17533 {
17534         struct mlx5_priv *priv = dev->data->dev_private;
17535         struct mlx5_age_info *age_info;
17536         struct mlx5_age_param *age_param;
17537         struct mlx5_flow_counter *counter;
17538         struct mlx5_aso_age_action *act;
17539         int nb_flows = 0;
17540
17541         if (nb_contexts && !context)
17542                 return rte_flow_error_set(error, EINVAL,
17543                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17544                                           NULL, "empty context");
17545         age_info = GET_PORT_AGE_INFO(priv);
17546         rte_spinlock_lock(&age_info->aged_sl);
17547         LIST_FOREACH(act, &age_info->aged_aso, next) {
17548                 nb_flows++;
17549                 if (nb_contexts) {
17550                         context[nb_flows - 1] =
17551                                                 act->age_params.context;
17552                         if (!(--nb_contexts))
17553                                 break;
17554                 }
17555         }
17556         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17557                 nb_flows++;
17558                 if (nb_contexts) {
17559                         age_param = MLX5_CNT_TO_AGE(counter);
17560                         context[nb_flows - 1] = age_param->context;
17561                         if (!(--nb_contexts))
17562                                 break;
17563                 }
17564         }
17565         rte_spinlock_unlock(&age_info->aged_sl);
17566         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17567         return nb_flows;
17568 }
17569
17570 /*
17571  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17572  */
17573 static uint32_t
17574 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17575 {
17576         return flow_dv_counter_alloc(dev, 0);
17577 }
17578
17579 /**
17580  * Validate indirect action.
17581  * Dispatcher for action type specific validation.
17582  *
17583  * @param[in] dev
17584  *   Pointer to the Ethernet device structure.
17585  * @param[in] conf
17586  *   Indirect action configuration.
17587  * @param[in] action
17588  *   The indirect action object to validate.
17589  * @param[out] error
17590  *   Perform verbose error reporting if not NULL. Initialized in case of
17591  *   error only.
17592  *
17593  * @return
17594  *   0 on success, otherwise negative errno value.
17595  */
17596 static int
17597 flow_dv_action_validate(struct rte_eth_dev *dev,
17598                         const struct rte_flow_indir_action_conf *conf,
17599                         const struct rte_flow_action *action,
17600                         struct rte_flow_error *err)
17601 {
17602         struct mlx5_priv *priv = dev->data->dev_private;
17603
17604         RTE_SET_USED(conf);
17605         switch (action->type) {
17606         case RTE_FLOW_ACTION_TYPE_RSS:
17607                 /*
17608                  * priv->obj_ops is set according to driver capabilities.
17609                  * When DevX capabilities are
17610                  * sufficient, it is set to devx_obj_ops.
17611                  * Otherwise, it is set to ibv_obj_ops.
17612                  * ibv_obj_ops doesn't support ind_table_modify operation.
17613                  * In this case the indirect RSS action can't be used.
17614                  */
17615                 if (priv->obj_ops.ind_table_modify == NULL)
17616                         return rte_flow_error_set
17617                                         (err, ENOTSUP,
17618                                          RTE_FLOW_ERROR_TYPE_ACTION,
17619                                          NULL,
17620                                          "Indirect RSS action not supported");
17621                 return mlx5_validate_action_rss(dev, action, err);
17622         case RTE_FLOW_ACTION_TYPE_AGE:
17623                 if (!priv->sh->aso_age_mng)
17624                         return rte_flow_error_set(err, ENOTSUP,
17625                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17626                                                 NULL,
17627                                                 "Indirect age action not supported");
17628                 return flow_dv_validate_action_age(0, action, dev, err);
17629         case RTE_FLOW_ACTION_TYPE_COUNT:
17630                 return flow_dv_validate_action_count(dev, true, 0, err);
17631         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17632                 if (!priv->sh->ct_aso_en)
17633                         return rte_flow_error_set(err, ENOTSUP,
17634                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17635                                         "ASO CT is not supported");
17636                 return mlx5_validate_action_ct(dev, action->conf, err);
17637         default:
17638                 return rte_flow_error_set(err, ENOTSUP,
17639                                           RTE_FLOW_ERROR_TYPE_ACTION,
17640                                           NULL,
17641                                           "action type not supported");
17642         }
17643 }
17644
17645 /*
17646  * Check if the RSS configurations for colors of a meter policy match
17647  * each other, except the queues.
17648  *
17649  * @param[in] r1
17650  *   Pointer to the first RSS flow action.
17651  * @param[in] r2
17652  *   Pointer to the second RSS flow action.
17653  *
17654  * @return
17655  *   0 on match, 1 on conflict.
17656  */
17657 static inline int
17658 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17659                                const struct rte_flow_action_rss *r2)
17660 {
17661         if (r1 == NULL || r2 == NULL)
17662                 return 0;
17663         if (!(r1->level <= 1 && r2->level <= 1) &&
17664             !(r1->level > 1 && r2->level > 1))
17665                 return 1;
17666         if (r1->types != r2->types &&
17667             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17668               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17669                 return 1;
17670         if (r1->key || r2->key) {
17671                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17672                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17673
17674                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17675                         return 1;
17676         }
17677         return 0;
17678 }
17679
17680 /**
17681  * Validate the meter hierarchy chain for meter policy.
17682  *
17683  * @param[in] dev
17684  *   Pointer to the Ethernet device structure.
17685  * @param[in] meter_id
17686  *   Meter id.
17687  * @param[in] action_flags
17688  *   Holds the actions detected until now.
17689  * @param[out] is_rss
17690  *   Is RSS or not.
17691  * @param[out] hierarchy_domain
17692  *   The domain bitmap for hierarchy policy.
17693  * @param[out] error
17694  *   Perform verbose error reporting if not NULL. Initialized in case of
17695  *   error only.
17696  *
17697  * @return
17698  *   0 on success, otherwise negative errno value with error set.
17699  */
17700 static int
17701 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17702                                   uint32_t meter_id,
17703                                   uint64_t action_flags,
17704                                   bool *is_rss,
17705                                   uint8_t *hierarchy_domain,
17706                                   struct rte_mtr_error *error)
17707 {
17708         struct mlx5_priv *priv = dev->data->dev_private;
17709         struct mlx5_flow_meter_info *fm;
17710         struct mlx5_flow_meter_policy *policy;
17711         uint8_t cnt = 1;
17712
17713         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17714                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17715                 return -rte_mtr_error_set(error, EINVAL,
17716                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17717                                         NULL,
17718                                         "Multiple fate actions not supported.");
17719         *hierarchy_domain = 0;
17720         while (true) {
17721                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17722                 if (!fm)
17723                         return -rte_mtr_error_set(error, EINVAL,
17724                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17725                                         "Meter not found in meter hierarchy.");
17726                 if (fm->def_policy)
17727                         return -rte_mtr_error_set(error, EINVAL,
17728                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17729                         "Non termination meter not supported in hierarchy.");
17730                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17731                 MLX5_ASSERT(policy);
17732                 /**
17733                  * Only inherit the supported domains of the first meter in
17734                  * hierarchy.
17735                  * One meter supports at least one domain.
17736                  */
17737                 if (!*hierarchy_domain) {
17738                         if (policy->transfer)
17739                                 *hierarchy_domain |=
17740                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17741                         if (policy->ingress)
17742                                 *hierarchy_domain |=
17743                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17744                         if (policy->egress)
17745                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17746                 }
17747                 if (!policy->is_hierarchy) {
17748                         *is_rss = policy->is_rss;
17749                         break;
17750                 }
17751                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17752                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17753                         return -rte_mtr_error_set(error, EINVAL,
17754                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17755                                         "Exceed max hierarchy meter number.");
17756         }
17757         return 0;
17758 }
17759
17760 /**
17761  * Validate meter policy actions.
17762  * Dispatcher for action type specific validation.
17763  *
17764  * @param[in] dev
17765  *   Pointer to the Ethernet device structure.
17766  * @param[in] action
17767  *   The meter policy action object to validate.
17768  * @param[in] attr
17769  *   Attributes of flow to determine steering domain.
17770  * @param[out] error
17771  *   Perform verbose error reporting if not NULL. Initialized in case of
17772  *   error only.
17773  *
17774  * @return
17775  *   0 on success, otherwise negative errno value.
17776  */
17777 static int
17778 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17779                         const struct rte_flow_action *actions[RTE_COLORS],
17780                         struct rte_flow_attr *attr,
17781                         bool *is_rss,
17782                         uint8_t *domain_bitmap,
17783                         uint8_t *policy_mode,
17784                         struct rte_mtr_error *error)
17785 {
17786         struct mlx5_priv *priv = dev->data->dev_private;
17787         struct mlx5_sh_config *dev_conf = &priv->sh->config;
17788         const struct rte_flow_action *act;
17789         uint64_t action_flags[RTE_COLORS] = {0};
17790         int actions_n;
17791         int i, ret;
17792         struct rte_flow_error flow_err;
17793         uint8_t domain_color[RTE_COLORS] = {0};
17794         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17795         uint8_t hierarchy_domain = 0;
17796         const struct rte_flow_action_meter *mtr;
17797         bool def_green = false;
17798         bool def_yellow = false;
17799         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17800
17801         if (!dev_conf->dv_esw_en)
17802                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17803         *domain_bitmap = def_domain;
17804         /* Red color could only support DROP action. */
17805         if (!actions[RTE_COLOR_RED] ||
17806             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17807                 return -rte_mtr_error_set(error, ENOTSUP,
17808                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17809                                 NULL, "Red color only supports drop action.");
17810         /*
17811          * Check default policy actions:
17812          * Green / Yellow: no action, Red: drop action
17813          * Either G or Y will trigger default policy actions to be created.
17814          */
17815         if (!actions[RTE_COLOR_GREEN] ||
17816             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17817                 def_green = true;
17818         if (!actions[RTE_COLOR_YELLOW] ||
17819             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17820                 def_yellow = true;
17821         if (def_green && def_yellow) {
17822                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17823                 return 0;
17824         } else if (!def_green && def_yellow) {
17825                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17826         } else if (def_green && !def_yellow) {
17827                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17828         } else {
17829                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17830         }
17831         /* Set to empty string in case of NULL pointer access by user. */
17832         flow_err.message = "";
17833         for (i = 0; i < RTE_COLORS; i++) {
17834                 act = actions[i];
17835                 for (action_flags[i] = 0, actions_n = 0;
17836                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17837                      act++) {
17838                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17839                                 return -rte_mtr_error_set(error, ENOTSUP,
17840                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17841                                           NULL, "too many actions");
17842                         switch (act->type) {
17843                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17844                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17845                                 if (!dev_conf->dv_esw_en)
17846                                         return -rte_mtr_error_set(error,
17847                                         ENOTSUP,
17848                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17849                                         NULL, "PORT action validate check"
17850                                         " fail for ESW disable");
17851                                 ret = flow_dv_validate_action_port_id(dev,
17852                                                 action_flags[i],
17853                                                 act, attr, &flow_err);
17854                                 if (ret)
17855                                         return -rte_mtr_error_set(error,
17856                                         ENOTSUP,
17857                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17858                                         NULL, flow_err.message ?
17859                                         flow_err.message :
17860                                         "PORT action validate check fail");
17861                                 ++actions_n;
17862                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17863                                 break;
17864                         case RTE_FLOW_ACTION_TYPE_MARK:
17865                                 ret = flow_dv_validate_action_mark(dev, act,
17866                                                            action_flags[i],
17867                                                            attr, &flow_err);
17868                                 if (ret < 0)
17869                                         return -rte_mtr_error_set(error,
17870                                         ENOTSUP,
17871                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17872                                         NULL, flow_err.message ?
17873                                         flow_err.message :
17874                                         "Mark action validate check fail");
17875                                 if (dev_conf->dv_xmeta_en !=
17876                                         MLX5_XMETA_MODE_LEGACY)
17877                                         return -rte_mtr_error_set(error,
17878                                         ENOTSUP,
17879                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17880                                         NULL, "Extend MARK action is "
17881                                         "not supported. Please try use "
17882                                         "default policy for meter.");
17883                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17884                                 ++actions_n;
17885                                 break;
17886                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17887                                 ret = flow_dv_validate_action_set_tag(dev,
17888                                                         act, action_flags[i],
17889                                                         attr, &flow_err);
17890                                 if (ret)
17891                                         return -rte_mtr_error_set(error,
17892                                         ENOTSUP,
17893                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17894                                         NULL, flow_err.message ?
17895                                         flow_err.message :
17896                                         "Set tag action validate check fail");
17897                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17898                                 ++actions_n;
17899                                 break;
17900                         case RTE_FLOW_ACTION_TYPE_DROP:
17901                                 ret = mlx5_flow_validate_action_drop
17902                                         (action_flags[i], attr, &flow_err);
17903                                 if (ret < 0)
17904                                         return -rte_mtr_error_set(error,
17905                                         ENOTSUP,
17906                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17907                                         NULL, flow_err.message ?
17908                                         flow_err.message :
17909                                         "Drop action validate check fail");
17910                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17911                                 ++actions_n;
17912                                 break;
17913                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17914                                 /*
17915                                  * Check whether extensive
17916                                  * metadata feature is engaged.
17917                                  */
17918                                 if (dev_conf->dv_flow_en &&
17919                                     (dev_conf->dv_xmeta_en !=
17920                                      MLX5_XMETA_MODE_LEGACY) &&
17921                                     mlx5_flow_ext_mreg_supported(dev))
17922                                         return -rte_mtr_error_set(error,
17923                                           ENOTSUP,
17924                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17925                                           NULL, "Queue action with meta "
17926                                           "is not supported. Please try use "
17927                                           "default policy for meter.");
17928                                 ret = mlx5_flow_validate_action_queue(act,
17929                                                         action_flags[i], dev,
17930                                                         attr, &flow_err);
17931                                 if (ret < 0)
17932                                         return -rte_mtr_error_set(error,
17933                                           ENOTSUP,
17934                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17935                                           NULL, flow_err.message ?
17936                                           flow_err.message :
17937                                           "Queue action validate check fail");
17938                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17939                                 ++actions_n;
17940                                 break;
17941                         case RTE_FLOW_ACTION_TYPE_RSS:
17942                                 if (dev_conf->dv_flow_en &&
17943                                     (dev_conf->dv_xmeta_en !=
17944                                      MLX5_XMETA_MODE_LEGACY) &&
17945                                     mlx5_flow_ext_mreg_supported(dev))
17946                                         return -rte_mtr_error_set(error,
17947                                           ENOTSUP,
17948                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17949                                           NULL, "RSS action with meta "
17950                                           "is not supported. Please try use "
17951                                           "default policy for meter.");
17952                                 ret = mlx5_validate_action_rss(dev, act,
17953                                                                &flow_err);
17954                                 if (ret < 0)
17955                                         return -rte_mtr_error_set(error,
17956                                           ENOTSUP,
17957                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17958                                           NULL, flow_err.message ?
17959                                           flow_err.message :
17960                                           "RSS action validate check fail");
17961                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17962                                 ++actions_n;
17963                                 /* Either G or Y will set the RSS. */
17964                                 rss_color[i] = act->conf;
17965                                 break;
17966                         case RTE_FLOW_ACTION_TYPE_JUMP:
17967                                 ret = flow_dv_validate_action_jump(dev,
17968                                         NULL, act, action_flags[i],
17969                                         attr, true, &flow_err);
17970                                 if (ret)
17971                                         return -rte_mtr_error_set(error,
17972                                           ENOTSUP,
17973                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17974                                           NULL, flow_err.message ?
17975                                           flow_err.message :
17976                                           "Jump action validate check fail");
17977                                 ++actions_n;
17978                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17979                                 break;
17980                         /*
17981                          * Only the last meter in the hierarchy will support
17982                          * the YELLOW color steering. Then in the meter policy
17983                          * actions list, there should be no other meter inside.
17984                          */
17985                         case RTE_FLOW_ACTION_TYPE_METER:
17986                                 if (i != RTE_COLOR_GREEN)
17987                                         return -rte_mtr_error_set(error,
17988                                                 ENOTSUP,
17989                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17990                                                 NULL,
17991                                                 "Meter hierarchy only supports GREEN color.");
17992                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17993                                         return -rte_mtr_error_set(error,
17994                                                 ENOTSUP,
17995                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17996                                                 NULL,
17997                                                 "No yellow policy should be provided in meter hierarchy.");
17998                                 mtr = act->conf;
17999                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18000                                                         mtr->mtr_id,
18001                                                         action_flags[i],
18002                                                         is_rss,
18003                                                         &hierarchy_domain,
18004                                                         error);
18005                                 if (ret)
18006                                         return ret;
18007                                 ++actions_n;
18008                                 action_flags[i] |=
18009                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18010                                 break;
18011                         default:
18012                                 return -rte_mtr_error_set(error, ENOTSUP,
18013                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18014                                         NULL,
18015                                         "Doesn't support optional action");
18016                         }
18017                 }
18018                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18019                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18020                 } else if ((action_flags[i] &
18021                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18022                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18023                         /*
18024                          * Only support MLX5_XMETA_MODE_LEGACY
18025                          * so MARK action is only in ingress domain.
18026                          */
18027                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18028                 } else {
18029                         domain_color[i] = def_domain;
18030                         if (action_flags[i] &&
18031                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18032                                 domain_color[i] &=
18033                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18034                 }
18035                 if (action_flags[i] &
18036                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18037                         domain_color[i] &= hierarchy_domain;
18038                 /*
18039                  * Non-termination actions only support NIC Tx domain.
18040                  * The adjustion should be skipped when there is no
18041                  * action or only END is provided. The default domains
18042                  * bit-mask is set to find the MIN intersection.
18043                  * The action flags checking should also be skipped.
18044                  */
18045                 if ((def_green && i == RTE_COLOR_GREEN) ||
18046                     (def_yellow && i == RTE_COLOR_YELLOW))
18047                         continue;
18048                 /*
18049                  * Validate the drop action mutual exclusion
18050                  * with other actions. Drop action is mutually-exclusive
18051                  * with any other action, except for Count action.
18052                  */
18053                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18054                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18055                         return -rte_mtr_error_set(error, ENOTSUP,
18056                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18057                                 NULL, "Drop action is mutually-exclusive "
18058                                 "with any other action");
18059                 }
18060                 /* Eswitch has few restrictions on using items and actions */
18061                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18062                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18063                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18064                                 return -rte_mtr_error_set(error, ENOTSUP,
18065                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18066                                         NULL, "unsupported action MARK");
18067                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18068                                 return -rte_mtr_error_set(error, ENOTSUP,
18069                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18070                                         NULL, "unsupported action QUEUE");
18071                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18072                                 return -rte_mtr_error_set(error, ENOTSUP,
18073                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18074                                         NULL, "unsupported action RSS");
18075                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18076                                 return -rte_mtr_error_set(error, ENOTSUP,
18077                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18078                                         NULL, "no fate action is found");
18079                 } else {
18080                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18081                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18082                                 if ((domain_color[i] &
18083                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18084                                         domain_color[i] =
18085                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18086                                 else
18087                                         return -rte_mtr_error_set(error,
18088                                                 ENOTSUP,
18089                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18090                                                 NULL,
18091                                                 "no fate action is found");
18092                         }
18093                 }
18094         }
18095         /* If both colors have RSS, the attributes should be the same. */
18096         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18097                                            rss_color[RTE_COLOR_YELLOW]))
18098                 return -rte_mtr_error_set(error, EINVAL,
18099                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18100                                           NULL, "policy RSS attr conflict");
18101         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18102                 *is_rss = true;
18103         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18104         if (!def_green && !def_yellow &&
18105             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18106             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18107             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18108                 return -rte_mtr_error_set(error, EINVAL,
18109                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18110                                           NULL, "policy domains conflict");
18111         /*
18112          * At least one color policy is listed in the actions, the domains
18113          * to be supported should be the intersection.
18114          */
18115         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18116                          domain_color[RTE_COLOR_YELLOW];
18117         return 0;
18118 }
18119
18120 static int
18121 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18122 {
18123         struct mlx5_priv *priv = dev->data->dev_private;
18124         int ret = 0;
18125
18126         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18127                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18128                                                 flags);
18129                 if (ret != 0)
18130                         return ret;
18131         }
18132         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18133                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18134                 if (ret != 0)
18135                         return ret;
18136         }
18137         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18138                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18139                 if (ret != 0)
18140                         return ret;
18141         }
18142         return 0;
18143 }
18144
18145 /**
18146  * Discover the number of available flow priorities
18147  * by trying to create a flow with the highest priority value
18148  * for each possible number.
18149  *
18150  * @param[in] dev
18151  *   Ethernet device.
18152  * @param[in] vprio
18153  *   List of possible number of available priorities.
18154  * @param[in] vprio_n
18155  *   Size of @p vprio array.
18156  * @return
18157  *   On success, number of available flow priorities.
18158  *   On failure, a negative errno-style code and rte_errno is set.
18159  */
18160 static int
18161 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18162                             const uint16_t *vprio, int vprio_n)
18163 {
18164         struct mlx5_priv *priv = dev->data->dev_private;
18165         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18166         struct rte_flow_item_eth eth;
18167         struct rte_flow_item item = {
18168                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18169                 .spec = &eth,
18170                 .mask = &eth,
18171         };
18172         struct mlx5_flow_dv_matcher matcher = {
18173                 .mask = {
18174                         .size = sizeof(matcher.mask.buf),
18175                 },
18176         };
18177         union mlx5_flow_tbl_key tbl_key;
18178         struct mlx5_flow flow;
18179         void *action;
18180         struct rte_flow_error error;
18181         uint8_t misc_mask;
18182         int i, err, ret = -ENOTSUP;
18183
18184         /*
18185          * Prepare a flow with a catch-all pattern and a drop action.
18186          * Use drop queue, because shared drop action may be unavailable.
18187          */
18188         action = priv->drop_queue.hrxq->action;
18189         if (action == NULL) {
18190                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18191                 rte_errno = ENOTSUP;
18192                 return -rte_errno;
18193         }
18194         memset(&flow, 0, sizeof(flow));
18195         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18196         if (flow.handle == NULL) {
18197                 DRV_LOG(ERR, "Cannot create flow handle");
18198                 rte_errno = ENOMEM;
18199                 return -rte_errno;
18200         }
18201         flow.ingress = true;
18202         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18203         flow.dv.actions[0] = action;
18204         flow.dv.actions_n = 1;
18205         memset(&eth, 0, sizeof(eth));
18206         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18207                                    &item, /* inner */ false, /* group */ 0);
18208         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18209         for (i = 0; i < vprio_n; i++) {
18210                 /* Configure the next proposed maximum priority. */
18211                 matcher.priority = vprio[i] - 1;
18212                 memset(&tbl_key, 0, sizeof(tbl_key));
18213                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18214                                                /* tunnel */ NULL,
18215                                                /* group */ 0,
18216                                                &error);
18217                 if (err != 0) {
18218                         /* This action is pure SW and must always succeed. */
18219                         DRV_LOG(ERR, "Cannot register matcher");
18220                         ret = -rte_errno;
18221                         break;
18222                 }
18223                 /* Try to apply the flow to HW. */
18224                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18225                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18226                 err = mlx5_flow_os_create_flow
18227                                 (flow.handle->dvh.matcher->matcher_object,
18228                                  (void *)&flow.dv.value, flow.dv.actions_n,
18229                                  flow.dv.actions, &flow.handle->drv_flow);
18230                 if (err == 0) {
18231                         claim_zero(mlx5_flow_os_destroy_flow
18232                                                 (flow.handle->drv_flow));
18233                         flow.handle->drv_flow = NULL;
18234                 }
18235                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18236                 if (err != 0)
18237                         break;
18238                 ret = vprio[i];
18239         }
18240         mlx5_ipool_free(pool, flow.handle_idx);
18241         /* Set rte_errno if no expected priority value matched. */
18242         if (ret < 0)
18243                 rte_errno = -ret;
18244         return ret;
18245 }
18246
18247 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18248         .validate = flow_dv_validate,
18249         .prepare = flow_dv_prepare,
18250         .translate = flow_dv_translate,
18251         .apply = flow_dv_apply,
18252         .remove = flow_dv_remove,
18253         .destroy = flow_dv_destroy,
18254         .query = flow_dv_query,
18255         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18256         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18257         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18258         .create_meter = flow_dv_mtr_alloc,
18259         .free_meter = flow_dv_aso_mtr_release_to_pool,
18260         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18261         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18262         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18263         .create_policy_rules = flow_dv_create_policy_rules,
18264         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18265         .create_def_policy = flow_dv_create_def_policy,
18266         .destroy_def_policy = flow_dv_destroy_def_policy,
18267         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18268         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18269         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18270         .counter_alloc = flow_dv_counter_allocate,
18271         .counter_free = flow_dv_counter_free,
18272         .counter_query = flow_dv_counter_query,
18273         .get_aged_flows = flow_dv_get_aged_flows,
18274         .action_validate = flow_dv_action_validate,
18275         .action_create = flow_dv_action_create,
18276         .action_destroy = flow_dv_action_destroy,
18277         .action_update = flow_dv_action_update,
18278         .action_query = flow_dv_action_query,
18279         .sync_domain = flow_dv_sync_domain,
18280         .discover_priorities = flow_dv_discover_priorities,
18281         .item_create = flow_dv_item_create,
18282         .item_release = flow_dv_item_release,
18283 };
18284
18285 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18286